Skip to content
Snippets Groups Projects
Commit 99ac4c72 authored by tgupta6's avatar tgupta6
Browse files

Modified code with variable name scoping

parent 39219523
No related branches found
No related tags found
No related merge requests found
Showing
with 246 additions and 61 deletions
File added
import json
import sys
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import tensorflow as tf
from scipy import misc
def atr_mini_batch_loader(json_filename, image_dir, mean_image, start_index, batch_size, img_height=100, img_width=100, channels=3):
with open(json_filename, 'r') as json_file:
json_data = json.load(json_file)
atr_images = np.empty(shape=[9*batch_size, img_height/3, img_width/3, channels])
atr_labels = np.zeros(shape=[9*batch_size, 4])
for i in range(start_index, start_index + batch_size):
image_name = os.path.join(image_dir, str(i) + '.jpg')
image = misc.imresize(mpimg.imread(image_name),(img_height, img_width), interp='nearest')
crop_shape = np.array([image.shape[0], image.shape[1]])/3
selected_anno = [q for q in json_data if q['image_id']==i]
grid_config = selected_anno[0]['config']
counter = 0;
for grid_row in range(0,3):
for grid_col in range(0,3):
start_row = grid_row*crop_shape[0]
start_col = grid_col*crop_shape[1]
cropped_image = image[start_row:start_row+crop_shape[0], start_col:start_col+crop_shape[1], :]
if np.ndim(mean_image)==0:
atr_images[9*(i-start_index)+counter,:,:,:] = cropped_image/254.0
else:
atr_images[9*(i-start_index)+counter,:,:,:] = (cropped_image / 254.0) - mean_image
atr_labels[9*(i-start_index)+counter, grid_config[6*grid_row+2*grid_col+1]] = 1
counter = counter + 1
return (atr_images, atr_labels)
def mean_image_batch(json_filename, image_dir, start_index, batch_size, img_height=100, img_width=100, channels=3):
batch = atr_mini_batch_loader(json_filename, image_dir, np.empty([]), start_index, batch_size, img_height, img_width, channels)
mean_image = np.mean(batch[0], 0)
return mean_image
def mean_image(json_filename, image_dir, num_images, batch_size, img_height=100, img_width=100, channels=3):
max_iter = np.floor(num_images/batch_size)
mean_image = np.zeros([img_height/3, img_width/3, channels])
for i in range(max_iter.astype(np.int16)):
mean_image = mean_image + mean_image_batch(json_filename, image_dir, 1+i*batch_size, batch_size, img_height, img_width, channels)
mean_image = mean_image/max_iter
return mean_image
class html_atr_table_writer():
def __init__(self, filename):
self.filename = filename
self.html_file = open(self.filename, 'w')
self.html_file.write("""<!DOCTYPE html>\n<html>\n<body>\n<table border="1" style="width:100%"> \n""")
def add_element(self, col_dict):
self.html_file.write(' <tr>\n')
for key in range(len(col_dict)):
self.html_file.write(""" <td>{}</td>\n""".format(col_dict[key]))
self.html_file.write(' </tr>\n')
def image_tag(self, image_path, height, width):
return """<img src="{}" alt="IMAGE NOT FOUND!" height={} width={}>""".format(image_path,height,width)
def close_file(self):
self.html_file.write('</table>\n</body>\n</html>')
self.html_file.close()
if __name__=="__main__":
html_writer = html_atr_table_writer('/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier_v_1/trial.html')
col_dict={0: 'sam', 1: html_writer.image_tag('something.png',25,25)}
html_writer.add_element(col_dict)
html_writer.close_file()
File added
import sys
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from scipy import misc
import tensorflow as tf
import atr_data_io_helper as atr_data_loader
import tf_graph_creation_helper as graph_creator
import plot_helper as plotter
def eval(eval_params):
sess=tf.InteractiveSession()
x, y, keep_prob = graph_creator.placeholder_inputs()
_ = graph_creator.obj_comp_graph(x, 1.0)
obj_feat = tf.get_collection('obj_feat', scope='obj/conv2')
y_pred = graph_creator.atr_comp_graph(x, keep_prob, obj_feat[0])
accuracy = graph_creator.evaluation(y, y_pred)
saver = tf.train.Saver()
saver.restore(sess, eval_params['model_name'] + '-' + str(eval_params['global_step']))
mean_image = np.load(os.path.join(eval_params['out_dir'], 'mean_image.npy'))
test_json_filename = eval_params['test_json']
image_dir = eval_params['image_dir']
html_dir = eval_params['html_dir']
if not os.path.exists(html_dir):
os.mkdir(html_dir)
html_writer = atr_data_loader.html_atr_table_writer(os.path.join(html_dir, 'index.html'))
col_dict = {
0: 'Grount Truth',
1: 'Prediction',
2: 'Image'}
html_writer.add_element(col_dict)
color_dict = {
0: 'red', # blanks are treated as red
1: 'green',
2: 'blue'}
batch_size = 100
correct = 0
for i in range(50):
test_batch = atr_data_loader.atr_mini_batch_loader(test_json_filename, image_dir, mean_image, 10000+i*batch_size, batch_size, 75, 75)
feed_dict_test={x: test_batch[0], y: test_batch[1], keep_prob: 1.0}
result = sess.run([accuracy, y_pred], feed_dict=feed_dict_test)
correct = correct + result[0]*batch_size
print(correct)
for row in range(batch_size*9):
gt_id = np.argmax(test_batch[1][row,:])
pred_id = np.argmax(result[1][row, :])
if not gt_id==pred_id:
img_filename = os.path.join(html_dir,'{}_{}.png'.format(i,row))
misc.imsave(img_filename, test_batch[0][row,:,:,:] + mean_image)
col_dict = {
0: color_dict[gt_id],
1: color_dict[pred_id],
2: html_writer.image_tag('{}_{}.png'.format(i,row), 25, 25)}
html_writer.add_element(col_dict)
html_writer.close_file()
print('Test Accuracy: {}'.format(correct / 5000))
sess.close()
tf.reset_default_graph()
File added
......@@ -5,36 +5,33 @@ import matplotlib.image as mpimg
import numpy as np
import tensorflow as tf
import atr_data_io_helper as atr_data_loader
import tf_graph_creation_helper as graph_creator
import plot_helper as plotter
def train():
# Start session
def train(train_params):
sess = tf.InteractiveSession()
x, y, keep_prob = placeholder_inputs()
y_pred = comp_graph_v_2(x, y, keep_prob)
# Specify loss
cross_entropy = -tf.reduce_sum(y*tf.log(y_pred))
# Specify training method
train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
# Evaluator
accuracy = evaluation(y, y_pred)
x, y, keep_prob = graph_creator.placeholder_inputs()
_ = graph_creator.obj_comp_graph(x, 1.0)
obj_feat = tf.get_collection('obj_feat', scope='obj/conv2')
y_pred = graph_creator.atr_comp_graph(x, keep_prob, obj_feat[0])
cross_entropy = graph_creator.loss(y, y_pred)
vars_to_opt = tf.get_collection(tf.GraphKeys.VARIABLES, scope='atr')
all_vars = tf.get_collection(tf.GraphKeys.VARIABLES)
print('Variables that are being optimized: ' + ' '.join([var.name for var in vars_to_opt]))
print('All variables: ' + ' '.join([var.name for var in all_vars]))
train_step = tf.train.AdamOptimizer(train_params['adam_lr']).minimize(cross_entropy, var_list=vars_to_opt)
accuracy = graph_creator.evaluation(y, y_pred)
# Merge summaries and write them to ~/Code/Tensorflow_Exp/logDir
merged = tf.merge_all_summaries()
# Output dir
outdir = '/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier_v_1/'
outdir = train_params['out_dir']
if not os.path.exists(outdir):
os.mkdir(outdir)
# Training Data
img_width = 75
img_height = 75
train_json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/train_anno.json'
image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images'
train_json_filename = train_params['train_json']
image_dir = train_params['image_dir']
mean_image = atr_data_loader.mean_image(train_json_filename, image_dir, 1000, 100, img_height, img_width)
np.save(os.path.join(outdir, 'mean_image.npy'), mean_image)
......@@ -44,12 +41,17 @@ def train():
# Session Saver
saver = tf.train.Saver()
# Start Training
sess.run(tf.initialize_all_variables())
batch_size = 100
max_epoch = 10
max_iter = 95
# Restore obj network parameters
saver.restore(sess, train_params['obj_model_name'] + '-' + str(train_params['obj_global_step']))
batch_size = 10
max_epoch = 2
max_iter = 950
val_acc_array_iter = np.empty([max_iter*max_epoch])
val_acc_array_epoch = np.zeros([max_epoch])
train_acc_array_epoch = np.zeros([max_epoch])
......@@ -57,24 +59,20 @@ def train():
for i in range(max_iter):
train_batch = atr_data_loader.atr_mini_batch_loader(train_json_filename, image_dir, mean_image, 1+i*batch_size, batch_size, img_height, img_width)
feed_dict_train={x: train_batch[0], y: train_batch[1], keep_prob: 0.5}
_, current_train_batch_acc = sess.run([train_step, accuracy], feed_dict=feed_dict_train)
train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] + current_train_batch_acc
val_acc_array_iter[i+epoch*max_iter] = accuracy.eval(feed_dict_val)
plotter.plot_accuracy(np.arange(0,i+1+epoch*max_iter)+1, val_acc_array_iter[0:i+1+epoch*max_iter], xlim=[1, max_epoch*max_iter], ylim=[0, 1.], savePath=os.path.join(outdir,'valAcc_vs_iter.pdf'))
print('Step: {} Val Accuracy: {}'.format(i+1+epoch*max_iter, val_acc_array_iter[i+epoch*max_iter]))
plot_accuracy(np.arange(0,i+1+epoch*max_iter)+1, val_acc_array_iter[0:i+1+epoch*max_iter], xlim=[1, max_epoch*max_iter], ylim=[0, 1.], savePath=os.path.join(outdir,'valAcc_vs_iter.pdf'))
train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] / max_iter
val_acc_array_epoch[epoch] = val_acc_array_iter[i+epoch*max_iter]
plot_accuracies(xdata=np.arange(0,epoch+1)+1, ydata_train=train_acc_array_epoch[0:epoch+1], ydata_val=val_acc_array_epoch[0:epoch+1], xlim=[1, max_epoch], ylim=[0, 1.], savePath=os.path.join(outdir,'acc_vs_epoch.pdf'))
save_path = saver.save(sess, os.path.join(outdir,'obj_classifier_{}.ckpt'.format(epoch)))
plotter.plot_accuracies(xdata=np.arange(0,epoch+1)+1, ydata_train=train_acc_array_epoch[0:epoch+1], ydata_val=val_acc_array_epoch[0:epoch+1], xlim=[1, max_epoch], ylim=[0, 1.], savePath=os.path.join(outdir,'acc_vs_epoch.pdf'))
save_path = saver.save(sess, os.path.join(outdir,'atr_classifier'), global_step=epoch)
sess.close()
tf.reset_default_graph()
if __name__=='__main__':
train()
File added
......@@ -5,7 +5,7 @@ import matplotlib.image as mpimg
import numpy as np
from scipy import misc
import tensorflow as tf
import object_classifiers.obj_data_io_helper as shape_data_loader
import obj_data_io_helper as shape_data_loader
import tf_graph_creation_helper as graph_creator
import plot_helper as plotter
......@@ -25,32 +25,42 @@ def eval(eval_params):
if not os.path.exists(html_dir):
os.mkdir(html_dir)
html_writer = shape_data_loader.html_obj_table_writer(os.path.join(html_dir, 'index.html'))
col_dict = {0: 'Grount Truth',
1: 'Prediction',
2: 'Image'}
col_dict = {
0: 'Grount Truth',
1: 'Prediction',
2: 'Image'}
html_writer.add_element(col_dict)
shape_dict = {0: 'blank',
1: 'rectangle',
2: 'triangle',
3: 'circle'}
shape_dict = {
0: 'blank',
1: 'rectangle',
2: 'triangle',
3: 'circle'}
batch_size = 100
correct = 0
for i in range(1):
for i in range(50):
test_batch = shape_data_loader.obj_mini_batch_loader(test_json_filename, image_dir, mean_image, 10000 + i * batch_size, batch_size, 75, 75)
#print(test_batch[0].dtype)
# print([np.amax(test_batch[0][0,:, :, :]), np.amax(mean_image)])
feed_dict_test = {x: test_batch[0], y: test_batch[1], keep_prob: 1.0}
result = sess.run([accuracy, y_pred], feed_dict=feed_dict_test)
correct = correct + result[0] * batch_size
print correct
for row in range(batch_size * 9):
gt_id = np.argmax(test_batch[1][row, :])
pred_id = np.argmax(result[1][row, :])
if not gt_id == pred_id:
img_filename = os.path.join(html_dir, '{}_{}.png'.format(i, row))
misc.imsave(img_filename, test_batch[0][row, :, :, :])
col_dict = {0: shape_dict[gt_id],
1: shape_dict[pred_id],
2: html_writer.image_tag('{}_{}.png'.format(i, row), 25, 25)}
misc.imsave(img_filename, test_batch[0][row, :, :, :] + mean_image)
col_dict = {
0: shape_dict[gt_id],
1: shape_dict[pred_id],
2: html_writer.image_tag('{}_{}.png'.format(i, row), 25, 25)}
html_writer.add_element(col_dict)
html_writer.close_file()
print 'Test Accuracy: {}'.format(correct / 5000)
sess.close()
tf.reset_default_graph()
No preview for this file type
#Embedded file name: /home/tanmay/Code/GenVQA/GenVQA/classifiers/object_classifiers/obj_data_io_helper.py
import json
import sys
import os
......@@ -9,19 +8,20 @@ import tensorflow as tf
from scipy import misc
def obj_mini_batch_loader(json_filename, image_dir, mean_image, start_index, batch_size, img_height = 100, img_width = 100, channels = 3):
with open(json_filename, 'r') as json_file:
json_data = json.load(json_file)
obj_images = np.empty(shape=[9 * batch_size,
img_height / 3,
img_width / 3,
channels])
obj_images = np.empty(shape=[9 * batch_size, img_height / 3, img_width / 3, channels])
obj_labels = np.zeros(shape=[9 * batch_size, 4])
for i in range(start_index, start_index + batch_size):
image_name = os.path.join(image_dir, str(i) + '.jpg')
image = misc.imresize(mpimg.imread(image_name), (img_height, img_width), interp='nearest')
crop_shape = np.array([image.shape[0], image.shape[1]]) / 3
selected_anno = [ q for q in json_data if q['image_id'] == i ]
grid_config = selected_anno[0]['config']
counter = 0
for grid_row in range(0, 3):
for grid_col in range(0, 3):
......@@ -31,7 +31,7 @@ def obj_mini_batch_loader(json_filename, image_dir, mean_image, start_index, bat
if np.ndim(mean_image) == 0:
obj_images[9 * (i - start_index) + counter, :, :, :] = cropped_image / 254.0
else:
obj_images[9 * (i - start_index) + counter, :, :, :] = (cropped_image - mean_image) / 254
obj_images[9 * (i - start_index) + counter, :, :, :] = (cropped_image / 254.0) - mean_image
obj_labels[9 * (i - start_index) + counter, grid_config[6 * grid_row + 2 * grid_col]] = 1
counter = counter + 1
......@@ -51,7 +51,6 @@ def mean_image(json_filename, image_dir, num_images, batch_size, img_height = 10
mean_image = mean_image + mean_image_batch(json_filename, image_dir, 1 + i * batch_size, batch_size, img_height, img_width, channels)
mean_image = mean_image / max_iter
tmp_mean_image = mean_image * 254
return mean_image
......
No preview for this file type
......@@ -21,21 +21,26 @@ def train(train_params):
if not os.path.exists(outdir):
os.mkdir(outdir)
# Training Data
img_width = 75
img_height = 75
train_json_filename = train_params['train_json']
image_dir = train_params['image_dir']
mean_image = shape_data_loader.mean_image(train_json_filename, image_dir, 1000, 100, img_height, img_width)
np.save(os.path.join(outdir, 'mean_image.npy'), mean_image)
# Val Data
val_batch = shape_data_loader.obj_mini_batch_loader(train_json_filename, image_dir, mean_image, 9501, 499, img_height, img_width)
feed_dict_val = {x: val_batch[0], y: val_batch[1], keep_prob: 1.0}
# Session Saver
saver = tf.train.Saver()
# Start Training
sess.run(tf.initialize_all_variables())
batch_size = 10
max_epoch = 2
max_iter = 1
max_iter = 950
val_acc_array_iter = np.empty([max_iter * max_epoch])
val_acc_array_epoch = np.zeros([max_epoch])
train_acc_array_epoch = np.zeros([max_epoch])
......
No preview for this file type
......@@ -6,35 +6,59 @@ import numpy as np
import tensorflow as tf
import object_classifiers.train_obj_classifier as obj_trainer
import object_classifiers.eval_obj_classifier as obj_evaluator
import attribute_classifiers.train_atr_classifier as atr_trainer
import attribute_classifiers.eval_atr_classifier as atr_evaluator
workflow = {
'train_obj': False,
'eval_obj': False,
'train_atr': True,
'eval_atr': True,
}
train_params = {
obj_classifier_train_params = {
'out_dir': '/home/tanmay/Code/GenVQA/Exp_Results/Obj_Classifier',
'adam_lr': 0.001,
'train_json': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/train_anno.json',
'image_dir': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images',
}
eval_params = {
obj_classifier_eval_params = {
'out_dir': '/home/tanmay/Code/GenVQA/Exp_Results/Obj_Classifier',
'model_name': '/home/tanmay/Code/GenVQA/Exp_Results/Obj_Classifier/obj_classifier',
'global_step': 1,
'test_json': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/test_anno.json',
'image_dir': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images',
'html_dir': '/home/tanmay/Code/GenVQA/Exp_Results/Obj_Classifier/html_dir',
'create_graph': False,
}
atr_classifier_train_params = {
'out_dir': '/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier',
'adam_lr': 0.001,
'train_json': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/train_anno.json',
'image_dir': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images',
'obj_model_name': obj_classifier_eval_params['model_name'],
'obj_global_step': 1,
}
atr_classifier_eval_params = {
'out_dir': '/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier',
'model_name': '/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier/atr_classifier',
'global_step': 1,
'test_json': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/test_anno.json',
'image_dir': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images',
'html_dir': '/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier/html_dir',
}
if __name__=='__main__':
if workflow['train_obj']:
obj_trainer.train(train_params)
obj_trainer.train(obj_classifier_train_params)
obj_feat = tf.get_collection('obj_feat', scope='obj/conv2')
print(obj_feat)
if workflow['eval_obj']:
obj_evaluator.eval(eval_params)
obj_evaluator.eval(obj_classifier_eval_params)
if workflow['train_atr']:
atr_trainer.train(atr_classifier_train_params)
if workflow['eval_atr']:
atr_evaluator.eval(atr_classifier_eval_params)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment