Skip to content
Snippets Groups Projects
Commit b9351f74 authored by tgupta6's avatar tgupta6
Browse files

reset the graph at the end of obj classifier training

parent e469b217
No related branches found
No related tags found
No related merge requests found
Showing with 380 additions and 0 deletions
import sys
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import tensorflow as tf
import atr_data_io_helper as atr_data_loader
def train():
# Start session
sess = tf.InteractiveSession()
x, y, keep_prob = placeholder_inputs()
y_pred = comp_graph_v_2(x, y, keep_prob)
# Specify loss
cross_entropy = -tf.reduce_sum(y*tf.log(y_pred))
# Specify training method
train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
# Evaluator
accuracy = evaluation(y, y_pred)
# Merge summaries and write them to ~/Code/Tensorflow_Exp/logDir
merged = tf.merge_all_summaries()
# Output dir
outdir = '/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier_v_1/'
if not os.path.exists(outdir):
os.mkdir(outdir)
# Training Data
img_width = 75
img_height = 75
train_json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/train_anno.json'
image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images'
mean_image = atr_data_loader.mean_image(train_json_filename, image_dir, 1000, 100, img_height, img_width)
np.save(os.path.join(outdir, 'mean_image.npy'), mean_image)
# Val Data
val_batch = atr_data_loader.atr_mini_batch_loader(train_json_filename, image_dir, mean_image, 9501, 499, img_height, img_width)
feed_dict_val={x: val_batch[0], y: val_batch[1], keep_prob: 1.0}
# Session Saver
saver = tf.train.Saver()
# Start Training
sess.run(tf.initialize_all_variables())
batch_size = 100
max_epoch = 10
max_iter = 95
val_acc_array_iter = np.empty([max_iter*max_epoch])
val_acc_array_epoch = np.zeros([max_epoch])
train_acc_array_epoch = np.zeros([max_epoch])
for epoch in range(max_epoch):
for i in range(max_iter):
train_batch = atr_data_loader.atr_mini_batch_loader(train_json_filename, image_dir, mean_image, 1+i*batch_size, batch_size, img_height, img_width)
feed_dict_train={x: train_batch[0], y: train_batch[1], keep_prob: 0.5}
_, current_train_batch_acc = sess.run([train_step, accuracy], feed_dict=feed_dict_train)
train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] + current_train_batch_acc
val_acc_array_iter[i+epoch*max_iter] = accuracy.eval(feed_dict_val)
print('Step: {} Val Accuracy: {}'.format(i+1+epoch*max_iter, val_acc_array_iter[i+epoch*max_iter]))
plot_accuracy(np.arange(0,i+1+epoch*max_iter)+1, val_acc_array_iter[0:i+1+epoch*max_iter], xlim=[1, max_epoch*max_iter], ylim=[0, 1.], savePath=os.path.join(outdir,'valAcc_vs_iter.pdf'))
train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] / max_iter
val_acc_array_epoch[epoch] = val_acc_array_iter[i+epoch*max_iter]
plot_accuracies(xdata=np.arange(0,epoch+1)+1, ydata_train=train_acc_array_epoch[0:epoch+1], ydata_val=val_acc_array_epoch[0:epoch+1], xlim=[1, max_epoch], ylim=[0, 1.], savePath=os.path.join(outdir,'acc_vs_epoch.pdf'))
save_path = saver.save(sess, os.path.join(outdir,'obj_classifier_{}.ckpt'.format(epoch)))
sess.close()
if __name__=='__main__':
train()
No preview for this file type
import sys
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from scipy import misc
import tensorflow as tf
import object_classifiers.obj_data_io_helper as shape_data_loader
import tf_graph_creation_helper as graph_creator
import plot_helper as plotter
def eval(eval_params):
sess = tf.InteractiveSession()
x, y, keep_prob = graph_creator.placeholder_inputs()
y_pred = graph_creator.obj_comp_graph(x, keep_prob)
accuracy = graph_creator.evaluation(y, y_pred)
saver = tf.train.Saver()
saver.restore(sess, eval_params['model_name'] + '-' + str(eval_params['global_step']))
mean_image = np.load(os.path.join(eval_params['out_dir'], 'mean_image.npy'))
test_json_filename = eval_params['test_json']
image_dir = eval_params['image_dir']
html_dir = eval_params['html_dir']
if not os.path.exists(html_dir):
os.mkdir(html_dir)
html_writer = shape_data_loader.html_obj_table_writer(os.path.join(html_dir, 'index.html'))
col_dict = {0: 'Grount Truth',
1: 'Prediction',
2: 'Image'}
html_writer.add_element(col_dict)
shape_dict = {0: 'blank',
1: 'rectangle',
2: 'triangle',
3: 'circle'}
batch_size = 100
correct = 0
for i in range(1):
test_batch = shape_data_loader.obj_mini_batch_loader(test_json_filename, image_dir, mean_image, 10000 + i * batch_size, batch_size, 75, 75)
feed_dict_test = {x: test_batch[0], y: test_batch[1], keep_prob: 1.0}
result = sess.run([accuracy, y_pred], feed_dict=feed_dict_test)
correct = correct + result[0] * batch_size
print correct
for row in range(batch_size * 9):
gt_id = np.argmax(test_batch[1][row, :])
pred_id = np.argmax(result[1][row, :])
if not gt_id == pred_id:
img_filename = os.path.join(html_dir, '{}_{}.png'.format(i, row))
misc.imsave(img_filename, test_batch[0][row, :, :, :])
col_dict = {0: shape_dict[gt_id],
1: shape_dict[pred_id],
2: html_writer.image_tag('{}_{}.png'.format(i, row), 25, 25)}
html_writer.add_element(col_dict)
html_writer.close_file()
print 'Test Accuracy: {}'.format(correct / 5000)
No preview for this file type
import sys
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import tensorflow as tf
import object_classifiers.obj_data_io_helper as shape_data_loader
import tf_graph_creation_helper as graph_creator
import plot_helper as plotter
def train(train_params):
sess = tf.InteractiveSession()
x, y, keep_prob = graph_creator.placeholder_inputs()
y_pred = graph_creator.obj_comp_graph(x, keep_prob)
cross_entropy = graph_creator.loss(y, y_pred)
train_step = tf.train.AdamOptimizer(train_params['adam_lr']).minimize(cross_entropy)
accuracy = graph_creator.evaluation(y, y_pred)
outdir = train_params['out_dir']
if not os.path.exists(outdir):
os.mkdir(outdir)
img_width = 75
img_height = 75
train_json_filename = train_params['train_json']
image_dir = train_params['image_dir']
mean_image = shape_data_loader.mean_image(train_json_filename, image_dir, 1000, 100, img_height, img_width)
np.save(os.path.join(outdir, 'mean_image.npy'), mean_image)
val_batch = shape_data_loader.obj_mini_batch_loader(train_json_filename, image_dir, mean_image, 9501, 499, img_height, img_width)
feed_dict_val = {x: val_batch[0], y: val_batch[1], keep_prob: 1.0}
saver = tf.train.Saver()
sess.run(tf.initialize_all_variables())
batch_size = 10
max_epoch = 2
max_iter = 1
val_acc_array_iter = np.empty([max_iter * max_epoch])
val_acc_array_epoch = np.zeros([max_epoch])
train_acc_array_epoch = np.zeros([max_epoch])
for epoch in range(max_epoch):
for i in range(max_iter):
train_batch = shape_data_loader.obj_mini_batch_loader(train_json_filename, image_dir, mean_image, 1 + i * batch_size, batch_size, img_height, img_width)
feed_dict_train = {x: train_batch[0], y: train_batch[1], keep_prob: 0.5}
_, current_train_batch_acc = sess.run([train_step, accuracy], feed_dict=feed_dict_train)
train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] + current_train_batch_acc
val_acc_array_iter[i + epoch * max_iter] = accuracy.eval(feed_dict_val)
plotter.plot_accuracy(np.arange(0, i + 1 + epoch * max_iter) + 1, val_acc_array_iter[0:i + 1 + epoch * max_iter], xlim=[1, max_epoch * max_iter], ylim=[0, 1.0], savePath=os.path.join(outdir, 'valAcc_vs_iter.pdf'))
print 'Step: {} Val Accuracy: {}'.format(i + 1 + epoch * max_iter, val_acc_array_iter[i + epoch * max_iter])
train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] / max_iter
val_acc_array_epoch[epoch] = val_acc_array_iter[i + epoch * max_iter]
plotter.plot_accuracies(xdata=np.arange(0, epoch + 1) + 1, ydata_train=train_acc_array_epoch[0:epoch + 1], ydata_val=val_acc_array_epoch[0:epoch + 1], xlim=[1, max_epoch], ylim=[0, 1.0], savePath=os.path.join(outdir, 'acc_vs_epoch.pdf'))
save_path = saver.save(sess, os.path.join(outdir, 'obj_classifier'), global_step=epoch)
sess.close()
tf.reset_default_graph()
if __name__ == '__main__':
train()
No preview for this file type
#Embedded file name: /home/tanmay/Code/GenVQA/GenVQA/classifiers/plot_helper.py
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import tensorflow as tf
def plot_accuracy(xdata, ydata, xlim = None, ylim = None, savePath = None):
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot(xdata, ydata)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
if not xlim == None:
plt.xlim(xlim)
if not ylim == None:
plt.ylim(ylim)
if not savePath == None:
fig.savefig(savePath)
plt.close(fig)
def plot_accuracies(xdata, ydata_train, ydata_val, xlim = None, ylim = None, savePath = None):
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot(xdata, ydata_train, xdata, ydata_val)
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(['Train', 'Val'], loc='lower right')
if not xlim == None:
plt.xlim(xlim)
if not ylim == None:
plt.ylim(ylim)
if not savePath == None:
fig.savefig(savePath)
plt.close(fig)
No preview for this file type
import numpy as np
import tensorflow as tf
def weight_variable(shape, var_name = 'W'):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=var_name)
def bias_variable(shape, var_name = 'b'):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=var_name)
def conv2d(x, W, var_name = 'W'):
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME', name=var_name)
def max_pool_2x2(x, var_name = 'h_pool'):
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME', name=var_name)
def placeholder_inputs(mode = 'gt'):
x = tf.placeholder(tf.float32, shape=[None,25,25,3])
keep_prob = tf.placeholder(tf.float32)
if mode == 'gt':
print 'Creating placeholder for ground truth'
y = tf.placeholder(tf.float32, shape=[None, 4])
return (x, y, keep_prob)
if mode == 'no_gt':
print 'No placeholder for ground truth'
return (x, keep_prob)
def obj_comp_graph(x, keep_prob):
with tf.name_scope('obj') as obj_graph:
with tf.name_scope('conv1') as conv1:
W_conv1 = weight_variable([5,5,3,4])
b_conv1 = bias_variable([4])
h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1, name='h')
h_pool1 = max_pool_2x2(h_conv1)
h_conv1_drop = tf.nn.dropout(h_pool1, keep_prob, name='h_pool_drop')
with tf.name_scope('conv2') as conv2:
W_conv2 = weight_variable([3,3,4,8])
b_conv2 = bias_variable([8])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2, name='h')
h_pool2 = max_pool_2x2(h_conv2)
h_pool2_drop = tf.nn.dropout(h_pool2, keep_prob, name='h_pool_drop')
h_pool2_drop_flat = tf.reshape(h_pool2_drop, [-1, 392], name='h_pool_drop_flat')
with tf.name_scope('fc1') as fc1:
W_fc1 = weight_variable([392, 4])
b_fc1 = bias_variable([4])
y_pred = tf.nn.softmax(tf.matmul(h_pool2_drop_flat, W_fc1) + b_fc1)
tf.add_to_collection('obj_feat', h_pool2_drop_flat)
return y_pred
def atr_comp_graph(x, keep_prob, obj_feat):
with tf.name_scope('atr') as obj_graph:
with tf.name_scope('conv1') as conv1:
W_conv1 = weight_variable([5,5,3,4])
b_conv1 = bias_variable([4])
h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1, name='h')
h_pool1 = max_pool_2x2(h_conv1)
h_conv1_drop = tf.nn.dropout(h_pool1, keep_prob, name='h_pool_drop')
with tf.name_scope('conv2') as conv2:
W_conv2 = weight_variable([3,3,4,8])
b_conv2 = bias_variable([8])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2, name='h')
h_pool2 = max_pool_2x2(h_conv2)
h_pool2_drop = tf.nn.dropout(h_pool2, keep_prob, name='h_pool_drop')
h_pool2_drop_flat = tf.reshape(h_pool2_drop, [-1, 392], name='h_pool_drop_flat')
with tf.name_scope('fc1') as fc1:
W_obj_fc1 = weight_variable([392, 4], var_name='W_obj')
W_atr_fc1 = weight_variable([392, 4], var_name='W_atr')
b_fc1 = bias_variable([4])
y_pred = tf.nn.softmax(tf.matmul(h_pool2_drop_flat, W_atr_fc1) + tf.matmul(obj_feat, W_obj_fc1) + b_fc1)
tf.add_to_collection('atr_feat', h_pool2_drop_flat)
return y_pred
def evaluation(y, y_pred):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_pred, 1), name='correct_prediction')
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
return accuracy
def loss(y, y_pred):
cross_entropy = -tf.reduce_sum(y * tf.log(y_pred), name='cross_entropy')
return cross_entropy
if __name__ == '__main__':
lg_dir = '/home/tanmay/Code/GenVQA/Exp_Results/lg_files/'
g = tf.Graph()
with g.as_default():
x, y, keep_prob = placeholder_inputs(mode='gt')
y_pred = obj_comp_graph(x, keep_prob)
obj_feat = tf.get_collection('obj_feat', scope='obj/conv2')
y_pred2 = atr_comp_graph(x, keep_prob, obj_feat[0])
accuracy = evaluation(y, y_pred2)
accuracy_summary = tf.scalar_summary('accuracy', accuracy)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
merged = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(lg_dir, graph_def=g.as_graph_def())
result = sess.run([merged, y_pred], feed_dict={x: np.random.rand(10, 25, 25, 3),
y: np.random.rand(10, 4),
keep_prob: 1.0})
summary_writer.add_summary(result[0], 1)
No preview for this file type
import sys
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import tensorflow as tf
import object_classifiers.train_obj_classifier as obj_trainer
import object_classifiers.eval_obj_classifier as obj_evaluator
workflow = {
'train_obj': False,
'eval_obj': False,
}
train_params = {
'out_dir': '/home/tanmay/Code/GenVQA/Exp_Results/Obj_Classifier',
'adam_lr': 0.001,
'train_json': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/train_anno.json',
'image_dir': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images',
}
eval_params = {
'out_dir': '/home/tanmay/Code/GenVQA/Exp_Results/Obj_Classifier',
'model_name': '/home/tanmay/Code/GenVQA/Exp_Results/Obj_Classifier/obj_classifier',
'global_step': 1,
'test_json': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/test_anno.json',
'image_dir': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images',
'html_dir': '/home/tanmay/Code/GenVQA/Exp_Results/Obj_Classifier/html_dir',
'create_graph': False,
}
if __name__=='__main__':
if workflow['train_obj']:
obj_trainer.train(train_params)
obj_feat = tf.get_collection('obj_feat', scope='obj/conv2')
print(obj_feat)
if workflow['eval_obj']:
obj_evaluator.eval(eval_params)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment