diff --git a/classifiers/attribute_classifiers/#eval_atr_classifier.py# b/classifiers/attribute_classifiers/#eval_atr_classifier.py# deleted file mode 100644 index 00debb0f950577dc0f57f85397dcb32a8a92aeb3..0000000000000000000000000000000000000000 --- a/classifiers/attribute_classifiers/#eval_atr_classifier.py# +++ /dev/null @@ -1,90 +0,0 @@ -import sys -import json -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np -from scipy import misc -import tensorflow as tf -import atr_data_io_helper as atr_data_loader -import tf_graph_creation_helper as graph_creator -import plot_helper as plotter - -def eval(eval_params): - sess=tf.InteractiveSession() - - x, y, keep_prob = graph_creator.placeholder_inputs() - _ = graph_creator.obj_comp_graph(x, 1.0) - g = tf.get_default_graph() - obj_feat = g.get_operation_by_name('obj/conv2/obj_feat') - y_pred = graph_creator.atr_comp_graph(x, keep_prob, obj_feat.outputs[0]) - accuracy = graph_creator.evaluation(y, y_pred) - - # Object model restorer - vars_to_restore = tf.get_collection(tf.GraphKeys.VARIABLES, scope='obj') + \ - tf.get_collection(tf.GraphKeys.VARIABLES, scope='atr') - print('Variables to restore:') - print([var.name for var in vars_to_restore]) - - saver = tf.train.Saver(vars_to_restore) - saver.restore(sess, eval_params['model_name'] + '-' + \ - str(eval_params['global_step'])) - - mean_image = np.load(os.path.join(eval_params['out_dir'], 'mean_image.npy')) - test_json_filename = eval_params['test_json'] - with open(test_json_filename, 'r') as json_file: - raw_json_data = json.load(json_file) - test_json_data = dict() - for entry in raw_json_data: - if entry['image_id'] not in test_json_data: - test_json_data[entry['image_id']]=entry['config'] - - image_dir = eval_params['image_dir'] - html_dir = eval_params['html_dir'] - if not os.path.exists(html_dir): - os.mkdir(html_dir) - - html_writer = atr_data_loader \ - .html_atr_table_writer(os.path.join(html_dir, 'index.html')) - - col_dict = { - 0: 'Grount Truth', - 1: 'Prediction', - 2: 'Image'} - - html_writer.add_element(col_dict) - - color_dict = { - 0: 'red', - 1: 'green', - 2: 'blue', - 3: 'blank'} - - batch_size = 100 - correct = 0 - for i in range(50): - test_batch = atr_data_loader\ - .atr_mini_batch_loader(test_json_data, image_dir, mean_image, - 10000+i*batch_size, batch_size, 75, 75) - feed_dict_test={x: test_batch[0], y: test_batch[1], keep_prob: 1.0} - result = sess.run([accuracy, y_pred], feed_dict=feed_dict_test) - correct = correct + result[0]*batch_size - print(correct) - - for row in range(batch_size*9): - gt_id = np.argmax(test_batch[1][row,:]) - pred_id = np.argmax(result[1][row, :]) - if not gt_id==pred_id: - img_filename = os.path.join(html_dir,'{}_{}.png'.format(i,row)) - misc.imsave(img_filename, test_batch[0][row,:,:,:] + mean_image) - col_dict = { - 0: color_dict[gt_id], - 1: color_dict[pred_id], - 2: html_writer.image_tag('{}_{}.png'.format(i,row), 25, 25)} - html_writer.add_element(col_dict) - - html_writer.close_file() - print('Test Accuracy: {}'.format(correct / 5000)) - - sess.close() - tf.reset_default_graph() diff --git a/classifiers/attribute_classifiers/.#eval_atr_classifier.py b/classifiers/attribute_classifiers/.#eval_atr_classifier.py deleted file mode 120000 index a7436f7a65f3e6f88d5ffc73b0dbd29c105d536d..0000000000000000000000000000000000000000 --- a/classifiers/attribute_classifiers/.#eval_atr_classifier.py +++ /dev/null @@ -1 +0,0 @@ -tanmay@crunchy.29101:1450461082 \ No newline at end of file diff --git a/classifiers/attribute_classifiers/eval_atr_classifier.py b/classifiers/attribute_classifiers/eval_atr_classifier.py index da1b343663ac50a438fcd0493029a1973f4c75ca..00debb0f950577dc0f57f85397dcb32a8a92aeb3 100644 --- a/classifiers/attribute_classifiers/eval_atr_classifier.py +++ b/classifiers/attribute_classifiers/eval_atr_classifier.py @@ -55,7 +55,7 @@ def eval(eval_params): html_writer.add_element(col_dict) color_dict = { - 0: 'red', # blanks are treated as red + 0: 'red', 1: 'green', 2: 'blue', 3: 'blank'} diff --git a/color_classifiers/__init__.py b/color_classifiers/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/color_classifiers/atr_data_io_helper.py b/color_classifiers/atr_data_io_helper.py deleted file mode 100644 index a68f75e533f9009d3129a5d5a2d9233591856f88..0000000000000000000000000000000000000000 --- a/color_classifiers/atr_data_io_helper.py +++ /dev/null @@ -1,90 +0,0 @@ -import json -import sys -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np -import tensorflow as tf -from scipy import misc - -def atr_mini_batch_loader(json_filename, image_dir, mean_image, start_index, batch_size, img_height=100, img_width=100, channels=3): - - with open(json_filename, 'r') as json_file: - json_data = json.load(json_file) - - atr_images = np.empty(shape=[9*batch_size, img_height/3, img_width/3, channels]) - atr_labels = np.zeros(shape=[9*batch_size, 4]) - - for i in range(start_index, start_index + batch_size): - image_name = os.path.join(image_dir, str(i) + '.jpg') - image = misc.imresize(mpimg.imread(image_name),(img_height, img_width), interp='nearest') -# image.resize((img_height, img_width, 3)) - crop_shape = np.array([image.shape[0], image.shape[1]])/3 - selected_anno = [q for q in json_data if q['image_id']==i] - grid_config = selected_anno[0]['config'] - - counter = 0; - for grid_row in range(0,3): - for grid_col in range(0,3): - start_row = grid_row*crop_shape[0] - start_col = grid_col*crop_shape[1] -# print([start_row, start_col]) - cropped_image = image[start_row:start_row+crop_shape[0], start_col:start_col+crop_shape[1], :] - if np.ndim(mean_image)==0: - atr_images[9*(i-start_index)+counter,:,:,:] = cropped_image/254. - else: - atr_images[9*(i-start_index)+counter,:,:,:] = (cropped_image-mean_image)/254 - atr_labels[9*(i-start_index)+counter, grid_config[6*grid_row+2*grid_col+1]] = 1 - counter = counter + 1 - - # imgplot = plt.imshow(obj_images[0,:,:,:].astype(np.uint8)) - # plt.show() - return (atr_images, atr_labels) - -def mean_image_batch(json_filename, image_dir, start_index, batch_size, img_height=100, img_width=100, channels=3): - batch = atr_mini_batch_loader(json_filename, image_dir, np.empty([]), start_index, batch_size, img_height, img_width, channels) - mean_image = np.mean(batch[0], 0) - return mean_image - -def mean_image(json_filename, image_dir, num_images, batch_size, img_height=100, img_width=100, channels=3): - max_iter = np.floor(num_images/batch_size) - mean_image = np.zeros([img_height/3, img_width/3, channels]) - for i in range(max_iter.astype(np.int16)): - mean_image = mean_image + mean_image_batch(json_filename, image_dir, 1+i*batch_size, batch_size, img_height, img_width, channels) - - mean_image = mean_image/max_iter - tmp_mean_image = mean_image*254 - # imgplot = plt.imshow(tmp_mean_image.astype(np.uint8)) - # plt.show() - return mean_image - - -class html_atr_table_writer(): - def __init__(self, filename): - self.filename = filename - self.html_file = open(self.filename, 'w') - self.html_file.write("""<!DOCTYPE html>\n<html>\n<body>\n<table border="1" style="width:100%"> \n""") - - def add_element(self, col_dict): - self.html_file.write(' <tr>\n') - for key in range(len(col_dict)): - self.html_file.write(""" <td>{}</td>\n""".format(col_dict[key])) - self.html_file.write(' </tr>\n') - - def image_tag(self, image_path, height, width): - return """<img src="{}" alt="IMAGE NOT FOUND!" height={} width={}>""".format(image_path,height,width) - - def close_file(self): - self.html_file.write('</table>\n</body>\n</html>') - self.html_file.close() - - - - -if __name__=="__main__": - - html_writer = html_atr_table_writer('/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier_v_1/trial.html') - col_dict={0: 'sam', 1: html_writer.image_tag('something.png',25,25)} - html_writer.add_element(col_dict) - html_writer.close_file() - diff --git a/color_classifiers/atr_data_io_helper.pyc b/color_classifiers/atr_data_io_helper.pyc deleted file mode 100644 index f1e7f0bf9e4b06fc1016b65356fdae76ff343f8e..0000000000000000000000000000000000000000 Binary files a/color_classifiers/atr_data_io_helper.pyc and /dev/null differ diff --git a/color_classifiers/eval_atr_classifier.py b/color_classifiers/eval_atr_classifier.py deleted file mode 100644 index c6a14dd52a072f22e3c85a1b815501b79ce6b675..0000000000000000000000000000000000000000 --- a/color_classifiers/eval_atr_classifier.py +++ /dev/null @@ -1,69 +0,0 @@ -import sys -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np -from scipy import misc -import tensorflow as tf -import atr_data_io_helper as atr_data_loader -from train_atr_classifier import placeholder_inputs, comp_graph_v_2, evaluation - -sess=tf.InteractiveSession() - -x, y, keep_prob = placeholder_inputs() -y_pred = comp_graph_v_2(x, y, keep_prob) - -accuracy = evaluation(y, y_pred) - -saver = tf.train.Saver() - -saver.restore(sess, '/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier_v_1/obj_classifier_1.ckpt') - -mean_image = np.load('/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier_v_1/mean_image.npy') - -# Test Data -test_json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/test_anno.json' -image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images' - -# Base dir for html visualizer -html_dir = '/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier_v_1/html' -if not os.path.exists(html_dir): - os.mkdir(html_dir) - -# HTML file writer -html_writer = atr_data_loader.html_atr_table_writer(os.path.join(html_dir,'index.html')) -col_dict={ - 0: 'Grount Truth', - 1: 'Prediction', - 2: 'Image'} -html_writer.add_element(col_dict) - -shape_dict = { - 0: 'blank', - 1: 'rectangle', - 2: 'triangle', - 3: 'circle'} - -batch_size = 100 -correct = 0 -for i in range(50): - test_batch = atr_data_loader.atr_mini_batch_loader(test_json_filename, image_dir, mean_image, 10000+i*batch_size, batch_size, 75, 75) - feed_dict_test={x: test_batch[0], y: test_batch[1], keep_prob: 1.0} - result = sess.run([accuracy, y_pred], feed_dict=feed_dict_test) - correct = correct + result[0]*batch_size - print(correct) - - for row in range(batch_size*9): - gt_id = np.argmax(test_batch[1][row,:]) - pred_id = np.argmax(result[1][row, :]) - if not gt_id==pred_id: - img_filename = os.path.join(html_dir,'{}_{}.png'.format(i,row)) - misc.imsave(img_filename, test_batch[0][row,:,:,:]) - col_dict = { - 0: shape_dict[gt_id], - 1: shape_dict[pred_id], - 2: html_writer.image_tag('{}_{}.png'.format(i,row), 25, 25)} - html_writer.add_element(col_dict) - -html_writer.close_file() -print('Test Accuracy: {}'.format(correct/5000)) diff --git a/color_classifiers/train_atr_classifier.py b/color_classifiers/train_atr_classifier.py deleted file mode 100644 index 1bc622a5cebe9cc8df1927d22039b06a3f571656..0000000000000000000000000000000000000000 --- a/color_classifiers/train_atr_classifier.py +++ /dev/null @@ -1,196 +0,0 @@ -import sys -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np -import tensorflow as tf -import atr_data_io_helper as atr_data_loader - -def plot_accuracy(xdata, ydata, xlim=None, ylim=None, savePath=None): - fig, ax = plt.subplots( nrows=1, ncols=1 ) - ax.plot(xdata, ydata) - plt.xlabel('Iterations') - plt.ylabel('Accuracy') - - if not xlim==None: - plt.xlim(xlim) - - if not ylim==None: - plt.ylim(ylim) - - if not savePath==None: - fig.savefig(savePath) - - - plt.close(fig) - -def plot_accuracies(xdata, ydata_train, ydata_val, xlim=None, ylim=None, savePath=None): - fig, ax = plt.subplots( nrows=1, ncols=1 ) - ax.plot(xdata, ydata_train, xdata, ydata_val) - plt.xlabel('Epochs') - plt.ylabel('Accuracy') - plt.legend(['Train', 'Val'], loc='lower right') - if not xlim==None: - plt.xlim(xlim) - - if not ylim==None: - plt.ylim(ylim) - - if not savePath==None: - fig.savefig(savePath) - - - plt.close(fig) - - -def weight_variable(shape): - initial = tf.truncated_normal(shape, stddev=0.1) - return tf.Variable(initial) - -def bias_variable(shape): - initial = tf.constant(0.1, shape=shape) - return tf.Variable(initial) - -def conv2d(x, W): - return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') - -def max_pool_2x2(x): - return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') - -def max_pool_4x4(x): - return tf.nn.max_pool(x, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding='SAME') - -def placeholder_inputs(): - # Specify placeholder_inputs - x = tf.placeholder(tf.float32, shape=[None, 25, 25, 3]) - y = tf.placeholder(tf.float32, shape=[None, 4]) - keep_prob = tf.placeholder(tf.float32) - return x, y, keep_prob - -def comp_graph_v_1(x, y, keep_prob): - # Specify computation graph - W_conv1 = weight_variable([5, 5, 3, 10]) - b_conv1 = bias_variable([10]) - - h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1) - - h_pool1 = max_pool_2x2(h_conv1) - #print(tf.Tensor.get_shape(h_pool1)) - - W_fc1 = weight_variable([13*13*10, 4]) - b_fc1 = bias_variable([4]) - - h_pool1_flat = tf.reshape(h_pool1, [-1, 13*13*10]) - h_pool1_flat_drop = tf.nn.dropout(h_pool1_flat, keep_prob) - - y_pred = tf.nn.softmax(tf.matmul(h_pool1_flat_drop,W_fc1) + b_fc1) - - return y_pred - -def comp_graph_v_2(x, y, keep_prob): - # Specify computation graph - W_conv1 = weight_variable([5, 5, 3, 4]) - b_conv1 = bias_variable([4]) - - h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1) - h_pool1 = max_pool_2x2(h_conv1) - h_conv1_drop = tf.nn.dropout(h_pool1, keep_prob) - - W_conv2 = weight_variable([3, 3, 4, 8]) - b_conv2 = bias_variable([8]) - - h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) - h_pool2 = max_pool_2x2(h_conv2) - - W_fc1 = weight_variable([7*7*8, 4]) - b_fc1 = bias_variable([4]) - - h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*8]) - h_pool2_flat_drop = tf.nn.dropout(h_pool2_flat, keep_prob) - - y_pred = tf.nn.softmax(tf.matmul(h_pool2_flat_drop,W_fc1) + b_fc1) - - return y_pred - -def evaluation(y, y_pred): - # Evaluation function - correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_pred,1)) - accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) - #tf.scalar_summary("accuracy", accuracy) - - return accuracy - - -def train(): - # Start session - sess = tf.InteractiveSession() - - x, y, keep_prob = placeholder_inputs() - y_pred = comp_graph_v_2(x, y, keep_prob) - - # Specify loss - cross_entropy = -tf.reduce_sum(y*tf.log(y_pred)) - - # Specify training method - train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy) - - # Evaluator - accuracy = evaluation(y, y_pred) - - # Merge summaries and write them to ~/Code/Tensorflow_Exp/logDir - merged = tf.merge_all_summaries() - - # Output dir - outdir = '/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier_v_1/' - if not os.path.exists(outdir): - os.mkdir(outdir) - - # Training Data - img_width = 75 - img_height = 75 - train_json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/train_anno.json' - image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images' - mean_image = atr_data_loader.mean_image(train_json_filename, image_dir, 1000, 100, img_height, img_width) - np.save(os.path.join(outdir, 'mean_image.npy'), mean_image) - - # Val Data - val_batch = atr_data_loader.atr_mini_batch_loader(train_json_filename, image_dir, mean_image, 9501, 499, img_height, img_width) - feed_dict_val={x: val_batch[0], y: val_batch[1], keep_prob: 1.0} - - # Session Saver - saver = tf.train.Saver() - - # Start Training - sess.run(tf.initialize_all_variables()) - batch_size = 100 - max_epoch = 10 - max_iter = 95 - val_acc_array_iter = np.empty([max_iter*max_epoch]) - val_acc_array_epoch = np.zeros([max_epoch]) - train_acc_array_epoch = np.zeros([max_epoch]) - for epoch in range(max_epoch): - for i in range(max_iter): - train_batch = atr_data_loader.atr_mini_batch_loader(train_json_filename, image_dir, mean_image, 1+i*batch_size, batch_size, img_height, img_width) - feed_dict_train={x: train_batch[0], y: train_batch[1], keep_prob: 0.5} - - _, current_train_batch_acc = sess.run([train_step, accuracy], feed_dict=feed_dict_train) - - train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] + current_train_batch_acc - val_acc_array_iter[i+epoch*max_iter] = accuracy.eval(feed_dict_val) - print('Step: {} Val Accuracy: {}'.format(i+1+epoch*max_iter, val_acc_array_iter[i+epoch*max_iter])) - plot_accuracy(np.arange(0,i+1+epoch*max_iter)+1, val_acc_array_iter[0:i+1+epoch*max_iter], xlim=[1, max_epoch*max_iter], ylim=[0, 1.], savePath=os.path.join(outdir,'valAcc_vs_iter.pdf')) - - - train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] / max_iter - val_acc_array_epoch[epoch] = val_acc_array_iter[i+epoch*max_iter] - - plot_accuracies(xdata=np.arange(0,epoch+1)+1, ydata_train=train_acc_array_epoch[0:epoch+1], ydata_val=val_acc_array_epoch[0:epoch+1], xlim=[1, max_epoch], ylim=[0, 1.], savePath=os.path.join(outdir,'acc_vs_epoch.pdf')) - - save_path = saver.save(sess, os.path.join(outdir,'obj_classifier_{}.ckpt'.format(epoch))) - - - sess.close() - -if __name__=='__main__': - train() - diff --git a/color_classifiers/train_atr_classifier.pyc b/color_classifiers/train_atr_classifier.pyc deleted file mode 100644 index 513dd9fab63523be1e847885b6c07fe3093f399c..0000000000000000000000000000000000000000 Binary files a/color_classifiers/train_atr_classifier.pyc and /dev/null differ diff --git a/object_classifiers/#eval_obj_classifier.py# b/object_classifiers/#eval_obj_classifier.py# deleted file mode 100644 index 28fa50eefc4a383fe4c3ff5c65c47c6f69069e74..0000000000000000000000000000000000000000 --- a/object_classifiers/#eval_obj_classifier.py# +++ /dev/null @@ -1,45 +0,0 @@ -import sys -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np -import tensorflow as tf -import obj_data_io_helper as shape_data_loader -from train_obj_classifier import placeholder_inputs, comp_graph_v_1, evaluation - -sess=tf.InteractiveSession() - -x, y, keep_prob = placeholder_inputs() -y_pred = comp_graph_v_1(x, y, keep_prob) - -accuracy = evaluation(y, y_pred) - -saver = tf.train.Saver() - -saver.restore(sess, '/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_1/obj_classifier_9.ckpt') - -mean_image = np.load('/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_1/mean_image.npy') - -# Test Data -test_json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/test_anno.json' -image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images' - -# HTML file writer -html_writer = shape_data_loader.html_obj_table_writer('/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_1/trial.html') -batch_size = 100 -correct = 0 -for i in range(1): #50 - test_batch = shape_data_loader.obj_mini_batch_loader(test_json_filename, image_dir, mean_image, 10000+i*batch_size, batch_size, 75, 75) - feed_dict_test={x: test_batch[0], y: test_batch[1], keep_prob: 1.0} - result = sess.run([accuracy, y_pred], feed_dict=feed_dict_test) - correct = correct + result[0]*batch_size - print(correct) - - for row in range(batch_size): - col_dict = { - 0: test_batch[1][row,:], - 1: y_pred[row, :]} - html_writer.add_element(col_dict) - -html_writer.close_file() -print('Test Accuracy: {}'.format(correct/5000)) diff --git a/object_classifiers/__init__.py b/object_classifiers/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/object_classifiers/eval_obj_classifier.py b/object_classifiers/eval_obj_classifier.py deleted file mode 100644 index 9853116cdccde23b3e043b83a53044c64d7ae585..0000000000000000000000000000000000000000 --- a/object_classifiers/eval_obj_classifier.py +++ /dev/null @@ -1,69 +0,0 @@ -import sys -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np -from scipy import misc -import tensorflow as tf -import obj_data_io_helper as shape_data_loader -from train_obj_classifier import placeholder_inputs, comp_graph_v_1, comp_graph_v_2, evaluation - -sess=tf.InteractiveSession() - -x, y, keep_prob = placeholder_inputs() -y_pred = comp_graph_v_2(x, y, keep_prob) - -accuracy = evaluation(y, y_pred) - -saver = tf.train.Saver() - -saver.restore(sess, '/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_2/obj_classifier_1.ckpt') - -mean_image = np.load('/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_2/mean_image.npy') - -# Test Data -test_json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/test_anno.json' -image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images' - -# Base dir for html visualizer -html_dir = '/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_2/html' -if not os.path.exists(html_dir): - os.mkdir(html_dir) - -# HTML file writer -html_writer = shape_data_loader.html_obj_table_writer(os.path.join(html_dir,'index.html')) -col_dict={ - 0: 'Grount Truth', - 1: 'Prediction', - 2: 'Image'} -html_writer.add_element(col_dict) - -shape_dict = { - 0: 'blank', - 1: 'rectangle', - 2: 'triangle', - 3: 'circle'} - -batch_size = 100 -correct = 0 -for i in range(50): - test_batch = shape_data_loader.obj_mini_batch_loader(test_json_filename, image_dir, mean_image, 10000+i*batch_size, batch_size, 75, 75) - feed_dict_test={x: test_batch[0], y: test_batch[1], keep_prob: 1.0} - result = sess.run([accuracy, y_pred], feed_dict=feed_dict_test) - correct = correct + result[0]*batch_size - print(correct) - - for row in range(batch_size*9): - gt_id = np.argmax(test_batch[1][row,:]) - pred_id = np.argmax(result[1][row, :]) - if not gt_id==pred_id: - img_filename = os.path.join(html_dir,'{}_{}.png'.format(i,row)) - misc.imsave(img_filename, test_batch[0][row,:,:,:]) - col_dict = { - 0: shape_dict[gt_id], - 1: shape_dict[pred_id], - 2: html_writer.image_tag('{}_{}.png'.format(i,row), 25, 25)} - html_writer.add_element(col_dict) - -html_writer.close_file() -print('Test Accuracy: {}'.format(correct/5000)) diff --git a/object_classifiers/obj_data_io_helper.py b/object_classifiers/obj_data_io_helper.py deleted file mode 100644 index d0427abb6d1f3d56ec3d99e5a3b913adc7177959..0000000000000000000000000000000000000000 --- a/object_classifiers/obj_data_io_helper.py +++ /dev/null @@ -1,90 +0,0 @@ -import json -import sys -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np -import tensorflow as tf -from scipy import misc - -def obj_mini_batch_loader(json_filename, image_dir, mean_image, start_index, batch_size, img_height=100, img_width=100, channels=3): - - with open(json_filename, 'r') as json_file: - json_data = json.load(json_file) - - obj_images = np.empty(shape=[9*batch_size, img_height/3, img_width/3, channels]) - obj_labels = np.zeros(shape=[9*batch_size, 4]) - - for i in range(start_index, start_index + batch_size): - image_name = os.path.join(image_dir, str(i) + '.jpg') - image = misc.imresize(mpimg.imread(image_name),(img_height, img_width), interp='nearest') -# image.resize((img_height, img_width, 3)) - crop_shape = np.array([image.shape[0], image.shape[1]])/3 - selected_anno = [q for q in json_data if q['image_id']==i] - grid_config = selected_anno[0]['config'] - - counter = 0; - for grid_row in range(0,3): - for grid_col in range(0,3): - start_row = grid_row*crop_shape[0] - start_col = grid_col*crop_shape[1] -# print([start_row, start_col]) - cropped_image = image[start_row:start_row+crop_shape[0], start_col:start_col+crop_shape[1], :] - if np.ndim(mean_image)==0: - obj_images[9*(i-start_index)+counter,:,:,:] = cropped_image/254. - else: - obj_images[9*(i-start_index)+counter,:,:,:] = (cropped_image-mean_image)/254 - obj_labels[9*(i-start_index)+counter, grid_config[6*grid_row+2*grid_col]] = 1 - counter = counter + 1 - - # imgplot = plt.imshow(obj_images[0,:,:,:].astype(np.uint8)) - # plt.show() - return (obj_images, obj_labels) - -def mean_image_batch(json_filename, image_dir, start_index, batch_size, img_height=100, img_width=100, channels=3): - batch = obj_mini_batch_loader(json_filename, image_dir, np.empty([]), start_index, batch_size, img_height, img_width, channels) - mean_image = np.mean(batch[0], 0) - return mean_image - -def mean_image(json_filename, image_dir, num_images, batch_size, img_height=100, img_width=100, channels=3): - max_iter = np.floor(num_images/batch_size) - mean_image = np.zeros([img_height/3, img_width/3, channels]) - for i in range(max_iter.astype(np.int16)): - mean_image = mean_image + mean_image_batch(json_filename, image_dir, 1+i*batch_size, batch_size, img_height, img_width, channels) - - mean_image = mean_image/max_iter - tmp_mean_image = mean_image*254 - # imgplot = plt.imshow(tmp_mean_image.astype(np.uint8)) - # plt.show() - return mean_image - - -class html_obj_table_writer(): - def __init__(self, filename): - self.filename = filename - self.html_file = open(self.filename, 'w') - self.html_file.write("""<!DOCTYPE html>\n<html>\n<body>\n<table border="1" style="width:100%"> \n""") - - def add_element(self, col_dict): - self.html_file.write(' <tr>\n') - for key in range(len(col_dict)): - self.html_file.write(""" <td>{}</td>\n""".format(col_dict[key])) - self.html_file.write(' </tr>\n') - - def image_tag(self, image_path, height, width): - return """<img src="{}" alt="IMAGE NOT FOUND!" height={} width={}>""".format(image_path,height,width) - - def close_file(self): - self.html_file.write('</table>\n</body>\n</html>') - self.html_file.close() - - - - -if __name__=="__main__": - - html_writer = html_obj_table_writer('/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_1/trial.html') - col_dict={0: 'sam', 1: html_writer.image_tag('something.png',25,25)} - html_writer.add_element(col_dict) - html_writer.close_file() - diff --git a/object_classifiers/obj_data_io_helper.pyc b/object_classifiers/obj_data_io_helper.pyc deleted file mode 100644 index 48169a55a825c93a57adb1cf4cc9cfeb258f4b15..0000000000000000000000000000000000000000 Binary files a/object_classifiers/obj_data_io_helper.pyc and /dev/null differ diff --git a/object_classifiers/obj_data_io_helper.py~ b/object_classifiers/obj_data_io_helper.py~ deleted file mode 100644 index c550af7fd9353169527b007eabe08e1badd2137b..0000000000000000000000000000000000000000 --- a/object_classifiers/obj_data_io_helper.py~ +++ /dev/null @@ -1,48 +0,0 @@ -import json -import sys -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np - - -def obj_mini_batch_loader(json_filename, batch_size, start_index): - with open(json_filename, 'r') as json_file: - json_data = json.load(json_file) - - obj_images = [] - obj_labels = [] - - for i in range(start_index, start_index + batch_size): - image_name = os.path.join(image_dir, str(i+1) + '.jpg') - image = mpimg.imread(image_name) - crop_shape = [image.shape[0]/3, image.shape[1]/3] - selected_anno = [q for q in json_data if q['image_id']==1] - grid_config = selected_anno[0]['config'] - - counter = 0; - for grid_row in range(0,3): - for grid_col in range(0,3): - start_row = grid_row*crop_shape[0] - start_col = grid_col*crop_shape[1] - print([start_row, start_col]) - obj_images.append(image[start_row:start_row+crop_shape[0]-1, start_col:start_col+crop_shape[1]-1, :]) - obj_labels.append(grid_config[6*grid_row+2*grid_col]) - # imgplot = plt.imshow(obj_images[counter]) - # plt.show() - counter = counter + 1 - - - - - -if __name__=="__main__": - - json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/train_anno.json' - batch_size = 1 - start_index = 0 - - image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images' - - obj_mini_batch_loader(json_filename, batch_size, start_index) - diff --git a/object_classifiers/train_obj_classifier.py b/object_classifiers/train_obj_classifier.py deleted file mode 100644 index 19b48a25fa3dcf668d8c2350a0e20538d1eb934e..0000000000000000000000000000000000000000 --- a/object_classifiers/train_obj_classifier.py +++ /dev/null @@ -1,197 +0,0 @@ -import sys -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np -import tensorflow as tf -import obj_data_io_helper as shape_data_loader - -def plot_accuracy(xdata, ydata, xlim=None, ylim=None, savePath=None): - fig, ax = plt.subplots( nrows=1, ncols=1 ) - ax.plot(xdata, ydata) - plt.xlabel('Iterations') - plt.ylabel('Accuracy') - - if not xlim==None: - plt.xlim(xlim) - - if not ylim==None: - plt.ylim(ylim) - - if not savePath==None: - fig.savefig(savePath) - - - plt.close(fig) - -def plot_accuracies(xdata, ydata_train, ydata_val, xlim=None, ylim=None, savePath=None): - fig, ax = plt.subplots( nrows=1, ncols=1 ) - ax.plot(xdata, ydata_train, xdata, ydata_val) - plt.xlabel('Epochs') - plt.ylabel('Accuracy') - plt.legend(['Train', 'Val'], loc='lower right') - if not xlim==None: - plt.xlim(xlim) - - if not ylim==None: - plt.ylim(ylim) - - if not savePath==None: - fig.savefig(savePath) - - - plt.close(fig) - - -def weight_variable(shape): - initial = tf.truncated_normal(shape, stddev=0.1) - return tf.Variable(initial) - -def bias_variable(shape): - initial = tf.constant(0.1, shape=shape) - return tf.Variable(initial) - -def conv2d(x, W): - return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') - -def max_pool_2x2(x): - return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') - -def max_pool_4x4(x): - return tf.nn.max_pool(x, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding='SAME') - -def placeholder_inputs(): - # Specify placeholder_inputs - x = tf.placeholder(tf.float32, shape=[None, 25, 25, 3]) - y = tf.placeholder(tf.float32, shape=[None, 4]) - keep_prob = tf.placeholder(tf.float32) - return x, y, keep_prob - -def comp_graph_v_1(x, y, keep_prob): - # Specify computation graph - W_conv1 = weight_variable([5, 5, 3, 10]) - b_conv1 = bias_variable([10]) - - h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1) - - h_pool1 = max_pool_2x2(h_conv1) - #print(tf.Tensor.get_shape(h_pool1)) - - W_fc1 = weight_variable([13*13*10, 4]) - b_fc1 = bias_variable([4]) - - h_pool1_flat = tf.reshape(h_pool1, [-1, 13*13*10]) - h_pool1_flat_drop = tf.nn.dropout(h_pool1_flat, keep_prob) - - y_pred = tf.nn.softmax(tf.matmul(h_pool1_flat_drop,W_fc1) + b_fc1) - - return y_pred - -def comp_graph_v_2(x, y, keep_prob): - # Specify computation graph - W_conv1 = weight_variable([5, 5, 3, 4]) - b_conv1 = bias_variable([4]) - - h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1) - h_pool1 = max_pool_2x2(h_conv1) - h_conv1_drop = tf.nn.dropout(h_pool1, keep_prob) - - W_conv2 = weight_variable([3, 3, 4, 8]) - b_conv2 = bias_variable([8]) - - h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) - h_pool2 = max_pool_2x2(h_conv2) - - W_fc1 = weight_variable([7*7*8, 4]) - b_fc1 = bias_variable([4]) - - h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*8]) - h_pool2_flat_drop = tf.nn.dropout(h_pool2_flat, keep_prob) - - y_pred = tf.nn.softmax(tf.matmul(h_pool2_flat_drop,W_fc1) + b_fc1) - - return y_pred - -def evaluation(y, y_pred): - # Evaluation function - correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_pred,1)) - accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) - #tf.scalar_summary("accuracy", accuracy) - - return accuracy - - -def train(): - # Start session - sess = tf.InteractiveSession() - - x, y, keep_prob = placeholder_inputs() - y_pred = comp_graph_v_2(x, y, keep_prob) - - # Specify loss - cross_entropy = -tf.reduce_sum(y*tf.log(y_pred)) - - # Specify training method - train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy) - - # Evaluator - accuracy = evaluation(y, y_pred) - - # Merge summaries and write them to ~/Code/Tensorflow_Exp/logDir - merged = tf.merge_all_summaries() - #writer = tf.train.SummaryWriter("/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier", graph_def=tf.GraphDef()) - - # Output dir - outdir = '/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_2/' - if not os.path.exists(outdir): - os.mkdir(outdir) - - # Training Data - img_width = 75 - img_height = 75 - train_json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/train_anno.json' - image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images' - mean_image = shape_data_loader.mean_image(train_json_filename, image_dir, 1000, 100, img_height, img_width) - np.save(os.path.join(outdir, 'mean_image.npy'), mean_image) - - # Val Data - val_batch = shape_data_loader.obj_mini_batch_loader(train_json_filename, image_dir, mean_image, 9501, 499, img_height, img_width) - feed_dict_val={x: val_batch[0], y: val_batch[1], keep_prob: 1.0} - - # Session Saver - saver = tf.train.Saver() - - # Start Training - sess.run(tf.initialize_all_variables()) - batch_size = 10 - max_epoch = 2 - max_iter = 950 - val_acc_array_iter = np.empty([max_iter*max_epoch]) - val_acc_array_epoch = np.zeros([max_epoch]) - train_acc_array_epoch = np.zeros([max_epoch]) - for epoch in range(max_epoch): - for i in range(max_iter): - train_batch = shape_data_loader.obj_mini_batch_loader(train_json_filename, image_dir, mean_image, 1+i*batch_size, batch_size, img_height, img_width) - feed_dict_train={x: train_batch[0], y: train_batch[1], keep_prob: 0.5} - - _, current_train_batch_acc = sess.run([train_step, accuracy], feed_dict=feed_dict_train) - - train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] + current_train_batch_acc - val_acc_array_iter[i+epoch*max_iter] = accuracy.eval(feed_dict_val) - print('Step: {} Val Accuracy: {}'.format(i+1+epoch*max_iter, val_acc_array_iter[i+epoch*max_iter])) - plot_accuracy(np.arange(0,i+1+epoch*max_iter)+1, val_acc_array_iter[0:i+1+epoch*max_iter], xlim=[1, max_epoch*max_iter], ylim=[0, 1.], savePath=os.path.join(outdir,'valAcc_vs_iter.pdf')) - - - train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] / max_iter - val_acc_array_epoch[epoch] = val_acc_array_iter[i+epoch*max_iter] - - plot_accuracies(xdata=np.arange(0,epoch+1)+1, ydata_train=train_acc_array_epoch[0:epoch+1], ydata_val=val_acc_array_epoch[0:epoch+1], xlim=[1, max_epoch], ylim=[0, 1.], savePath=os.path.join(outdir,'acc_vs_epoch.pdf')) - - save_path = saver.save(sess, os.path.join(outdir,'obj_classifier_{}.ckpt'.format(epoch))) - - - sess.close() - -if __name__=='__main__': - train() - diff --git a/object_classifiers/train_obj_classifier.pyc b/object_classifiers/train_obj_classifier.pyc deleted file mode 100644 index 550d73d4e5c289d02061f24457f8bc52c638aaee..0000000000000000000000000000000000000000 Binary files a/object_classifiers/train_obj_classifier.pyc and /dev/null differ