From 780a9e7b7d4c923e97b2937e368dc05a3daa0c7a Mon Sep 17 00:00:00 2001 From: tgupta6 <tgupta6@illinois.edu> Date: Thu, 7 Apr 2016 12:31:25 -0500 Subject: [PATCH] Deleted redundant files --- .../#eval_atr_classifier.py# | 90 -------- .../.#eval_atr_classifier.py | 1 - .../eval_atr_classifier.py | 2 +- color_classifiers/__init__.py | 0 color_classifiers/atr_data_io_helper.py | 90 -------- color_classifiers/atr_data_io_helper.pyc | Bin 4538 -> 0 bytes color_classifiers/eval_atr_classifier.py | 69 ------ color_classifiers/train_atr_classifier.py | 196 ----------------- color_classifiers/train_atr_classifier.pyc | Bin 7635 -> 0 bytes object_classifiers/#eval_obj_classifier.py# | 45 ---- object_classifiers/__init__.py | 0 object_classifiers/eval_obj_classifier.py | 69 ------ object_classifiers/obj_data_io_helper.py | 90 -------- object_classifiers/obj_data_io_helper.pyc | Bin 4545 -> 0 bytes object_classifiers/obj_data_io_helper.py~ | 48 ----- object_classifiers/train_obj_classifier.py | 197 ------------------ object_classifiers/train_obj_classifier.pyc | Bin 7697 -> 0 bytes 17 files changed, 1 insertion(+), 896 deletions(-) delete mode 100644 classifiers/attribute_classifiers/#eval_atr_classifier.py# delete mode 120000 classifiers/attribute_classifiers/.#eval_atr_classifier.py delete mode 100644 color_classifiers/__init__.py delete mode 100644 color_classifiers/atr_data_io_helper.py delete mode 100644 color_classifiers/atr_data_io_helper.pyc delete mode 100644 color_classifiers/eval_atr_classifier.py delete mode 100644 color_classifiers/train_atr_classifier.py delete mode 100644 color_classifiers/train_atr_classifier.pyc delete mode 100644 object_classifiers/#eval_obj_classifier.py# delete mode 100644 object_classifiers/__init__.py delete mode 100644 object_classifiers/eval_obj_classifier.py delete mode 100644 object_classifiers/obj_data_io_helper.py delete mode 100644 object_classifiers/obj_data_io_helper.pyc delete mode 100644 object_classifiers/obj_data_io_helper.py~ delete mode 100644 object_classifiers/train_obj_classifier.py delete mode 100644 object_classifiers/train_obj_classifier.pyc diff --git a/classifiers/attribute_classifiers/#eval_atr_classifier.py# b/classifiers/attribute_classifiers/#eval_atr_classifier.py# deleted file mode 100644 index 00debb0..0000000 --- a/classifiers/attribute_classifiers/#eval_atr_classifier.py# +++ /dev/null @@ -1,90 +0,0 @@ -import sys -import json -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np -from scipy import misc -import tensorflow as tf -import atr_data_io_helper as atr_data_loader -import tf_graph_creation_helper as graph_creator -import plot_helper as plotter - -def eval(eval_params): - sess=tf.InteractiveSession() - - x, y, keep_prob = graph_creator.placeholder_inputs() - _ = graph_creator.obj_comp_graph(x, 1.0) - g = tf.get_default_graph() - obj_feat = g.get_operation_by_name('obj/conv2/obj_feat') - y_pred = graph_creator.atr_comp_graph(x, keep_prob, obj_feat.outputs[0]) - accuracy = graph_creator.evaluation(y, y_pred) - - # Object model restorer - vars_to_restore = tf.get_collection(tf.GraphKeys.VARIABLES, scope='obj') + \ - tf.get_collection(tf.GraphKeys.VARIABLES, scope='atr') - print('Variables to restore:') - print([var.name for var in vars_to_restore]) - - saver = tf.train.Saver(vars_to_restore) - saver.restore(sess, eval_params['model_name'] + '-' + \ - str(eval_params['global_step'])) - - mean_image = np.load(os.path.join(eval_params['out_dir'], 'mean_image.npy')) - test_json_filename = eval_params['test_json'] - with open(test_json_filename, 'r') as json_file: - raw_json_data = json.load(json_file) - test_json_data = dict() - for entry in raw_json_data: - if entry['image_id'] not in test_json_data: - test_json_data[entry['image_id']]=entry['config'] - - image_dir = eval_params['image_dir'] - html_dir = eval_params['html_dir'] - if not os.path.exists(html_dir): - os.mkdir(html_dir) - - html_writer = atr_data_loader \ - .html_atr_table_writer(os.path.join(html_dir, 'index.html')) - - col_dict = { - 0: 'Grount Truth', - 1: 'Prediction', - 2: 'Image'} - - html_writer.add_element(col_dict) - - color_dict = { - 0: 'red', - 1: 'green', - 2: 'blue', - 3: 'blank'} - - batch_size = 100 - correct = 0 - for i in range(50): - test_batch = atr_data_loader\ - .atr_mini_batch_loader(test_json_data, image_dir, mean_image, - 10000+i*batch_size, batch_size, 75, 75) - feed_dict_test={x: test_batch[0], y: test_batch[1], keep_prob: 1.0} - result = sess.run([accuracy, y_pred], feed_dict=feed_dict_test) - correct = correct + result[0]*batch_size - print(correct) - - for row in range(batch_size*9): - gt_id = np.argmax(test_batch[1][row,:]) - pred_id = np.argmax(result[1][row, :]) - if not gt_id==pred_id: - img_filename = os.path.join(html_dir,'{}_{}.png'.format(i,row)) - misc.imsave(img_filename, test_batch[0][row,:,:,:] + mean_image) - col_dict = { - 0: color_dict[gt_id], - 1: color_dict[pred_id], - 2: html_writer.image_tag('{}_{}.png'.format(i,row), 25, 25)} - html_writer.add_element(col_dict) - - html_writer.close_file() - print('Test Accuracy: {}'.format(correct / 5000)) - - sess.close() - tf.reset_default_graph() diff --git a/classifiers/attribute_classifiers/.#eval_atr_classifier.py b/classifiers/attribute_classifiers/.#eval_atr_classifier.py deleted file mode 120000 index a7436f7..0000000 --- a/classifiers/attribute_classifiers/.#eval_atr_classifier.py +++ /dev/null @@ -1 +0,0 @@ -tanmay@crunchy.29101:1450461082 \ No newline at end of file diff --git a/classifiers/attribute_classifiers/eval_atr_classifier.py b/classifiers/attribute_classifiers/eval_atr_classifier.py index da1b343..00debb0 100644 --- a/classifiers/attribute_classifiers/eval_atr_classifier.py +++ b/classifiers/attribute_classifiers/eval_atr_classifier.py @@ -55,7 +55,7 @@ def eval(eval_params): html_writer.add_element(col_dict) color_dict = { - 0: 'red', # blanks are treated as red + 0: 'red', 1: 'green', 2: 'blue', 3: 'blank'} diff --git a/color_classifiers/__init__.py b/color_classifiers/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/color_classifiers/atr_data_io_helper.py b/color_classifiers/atr_data_io_helper.py deleted file mode 100644 index a68f75e..0000000 --- a/color_classifiers/atr_data_io_helper.py +++ /dev/null @@ -1,90 +0,0 @@ -import json -import sys -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np -import tensorflow as tf -from scipy import misc - -def atr_mini_batch_loader(json_filename, image_dir, mean_image, start_index, batch_size, img_height=100, img_width=100, channels=3): - - with open(json_filename, 'r') as json_file: - json_data = json.load(json_file) - - atr_images = np.empty(shape=[9*batch_size, img_height/3, img_width/3, channels]) - atr_labels = np.zeros(shape=[9*batch_size, 4]) - - for i in range(start_index, start_index + batch_size): - image_name = os.path.join(image_dir, str(i) + '.jpg') - image = misc.imresize(mpimg.imread(image_name),(img_height, img_width), interp='nearest') -# image.resize((img_height, img_width, 3)) - crop_shape = np.array([image.shape[0], image.shape[1]])/3 - selected_anno = [q for q in json_data if q['image_id']==i] - grid_config = selected_anno[0]['config'] - - counter = 0; - for grid_row in range(0,3): - for grid_col in range(0,3): - start_row = grid_row*crop_shape[0] - start_col = grid_col*crop_shape[1] -# print([start_row, start_col]) - cropped_image = image[start_row:start_row+crop_shape[0], start_col:start_col+crop_shape[1], :] - if np.ndim(mean_image)==0: - atr_images[9*(i-start_index)+counter,:,:,:] = cropped_image/254. - else: - atr_images[9*(i-start_index)+counter,:,:,:] = (cropped_image-mean_image)/254 - atr_labels[9*(i-start_index)+counter, grid_config[6*grid_row+2*grid_col+1]] = 1 - counter = counter + 1 - - # imgplot = plt.imshow(obj_images[0,:,:,:].astype(np.uint8)) - # plt.show() - return (atr_images, atr_labels) - -def mean_image_batch(json_filename, image_dir, start_index, batch_size, img_height=100, img_width=100, channels=3): - batch = atr_mini_batch_loader(json_filename, image_dir, np.empty([]), start_index, batch_size, img_height, img_width, channels) - mean_image = np.mean(batch[0], 0) - return mean_image - -def mean_image(json_filename, image_dir, num_images, batch_size, img_height=100, img_width=100, channels=3): - max_iter = np.floor(num_images/batch_size) - mean_image = np.zeros([img_height/3, img_width/3, channels]) - for i in range(max_iter.astype(np.int16)): - mean_image = mean_image + mean_image_batch(json_filename, image_dir, 1+i*batch_size, batch_size, img_height, img_width, channels) - - mean_image = mean_image/max_iter - tmp_mean_image = mean_image*254 - # imgplot = plt.imshow(tmp_mean_image.astype(np.uint8)) - # plt.show() - return mean_image - - -class html_atr_table_writer(): - def __init__(self, filename): - self.filename = filename - self.html_file = open(self.filename, 'w') - self.html_file.write("""<!DOCTYPE html>\n<html>\n<body>\n<table border="1" style="width:100%"> \n""") - - def add_element(self, col_dict): - self.html_file.write(' <tr>\n') - for key in range(len(col_dict)): - self.html_file.write(""" <td>{}</td>\n""".format(col_dict[key])) - self.html_file.write(' </tr>\n') - - def image_tag(self, image_path, height, width): - return """<img src="{}" alt="IMAGE NOT FOUND!" height={} width={}>""".format(image_path,height,width) - - def close_file(self): - self.html_file.write('</table>\n</body>\n</html>') - self.html_file.close() - - - - -if __name__=="__main__": - - html_writer = html_atr_table_writer('/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier_v_1/trial.html') - col_dict={0: 'sam', 1: html_writer.image_tag('something.png',25,25)} - html_writer.add_element(col_dict) - html_writer.close_file() - diff --git a/color_classifiers/atr_data_io_helper.pyc b/color_classifiers/atr_data_io_helper.pyc deleted file mode 100644 index f1e7f0bf9e4b06fc1016b65356fdae76ff343f8e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4538 zcmcInTXWmS6+R2<LQ>Sl5_L<RmX;?IHq#NMcItFeO0{dbZk$AJ;M8t7lVBhMB*EY& zU|mTu@X(R_(wDyW=k%ZSNA%G%X}@!pl$x}C=uAZdi?h22d(QdJWfzsdFV>$w`P=h8 zO@6a@-p8Fc6p@EZQApIgq80VjbljqzHKi*wo}u3Cv^z)RdFoZC-5T}kQ+k1V4WTUx z7b#kz-V!OVLVV4TH%qT8)LWLW8JUxjm46wTmyuiKK>*qsy(YRO`n<PF{u+68`pTlL zs{B>*7U&WxHz|Oo5W7hC{3eap$y*{1LYC<gLRRQrZ-aPZll%>SLvd$|7ge!<@T|QG zRDyrtciaXvq-W4XLEaC55(O1B{VJMm+zYrL;?DnMfsv??B`z)c^9Gew(u&3w6;&2g z*60Nm`1E&L5cHsCg^C$!7Qk|b){8mwn5W?k)KO)fm=lv%oPca-$=m{+uULAH3M?V3 zY(nPI7jIPAAU#jxDvfJYDAIK*8Z6zLBW5WVNiWbCOp7#LqGFNfpa6sqAZDI{zO+=i zB(j&OSfVlfQKRh%Ri|xM0SiLqa+!v(xdzF%sa&A~tP5amh+E!p|4WNb-%xpr3US{y zT(e!mTQw@z==|3THh{)rqW}A<Y|{DecShe<<vJBjD&R(F(kwTqzyNyCrde*XmIcOe z8IH|A0(1=HGE6)G>~Vn@RG`9^7znkDF*VWn8&zy^<g6EnVZhp8aJQPp8f{*^{qMwJ zhPrW=@G2JDLb(0!ufvdH;kzOWzyuF8sfn<K!OcyuZEnL)U1-EKAUu7cUYtI_{Resh zt+5K`pS5oxCXW$)M49G9&qr?R2Q^R<zrhTTakD#4hwuT$W9?@tVuvX9-OSH*0Fna9 z01o#ca)-VXc*5OJ;z2Nk^YQy@azFTn8Dtsc0Z9f)+9atT3o*_ULxhRzX=aL3(aeuh zeIeA>ewO4yWo|t5g$V<!W$NmYStfy=oPoK{Y&=LC)u3<iwJ$G6X%J0JiZUO<*nT(5 z+zXq<$}+JRL~YJrqyZ7@<_v<+kKM@E0+QJ31sU=Ox$nENBjzJ_ShSmICx|`&T&7Q4 z-5)t3pSdC38Tr9*q$f)aoim^|5>5LfH;(-<m#Hhc*FDeGt}N>6%!tgdx}kf*Z?Z!I z88jXdrP)TKA*;~OlGG8G3-_s5nfsyN*S_aKgG3A&W`XAzPm16B$x99bQB>x#<m|fB zPeRetn9oy;l<DytBgukOh!9oU7$9l#$Dlf+B=S4jjU)G>^Dy!J&SO7*{^^7118XFi z(+}M|4+eps<sHU=0dax^h=!@3?WPxc9c%GS6vTmJu;-ZaGqybc5gM{q)w=bbx^2Cs zexTm9-c>(TOV*s)z<&wUPu05mxz$yBYGbC)0ndi;EtUKTgsJ3QKc#>p&bdsYs7S3f z_42Yx<-9Op5QS6GHg2x5Rj&BY+8k}i0UBy^4BLC$u#z@Mk;EZKR+}Z;o7`-1^A<PX z<K{LuZ*%i~G!n;Rug#HrL+Gwx>Fanm_%fzBCpW400HHv}Ca^66*<U~avhft7b)d!A zU<jCDt5>Lr*vR-XN5cxE36##Ca1jP}I8ZR}ERW+l`eKiFOMu8b95xv2HdqHE6%@g5 z`CuMJuu7*Nqxb>!d2C4nK8kk}`=_{pu|GCdPf~|VRhzvoz7SLgVUlEagU2|ErNX(n zzDP|)LCM*_Xa9uXW3#V)C0Q81j3$}-AHBx)DRR%90GTC@v+uHqC2peBxyJft94aFB zAuMN!q}EgmIk9WiRlOpqG}2M%a6W1GB=nuLj1}!OFaH>^U*eX$@^L(rBjIZh*iaRp z#(FqaSwg_i6(MHHekLK`=M><(f?Fcx#~|QNIMspuLX4+JCLV;FC1}R7Dq=x-gjC}9 z&31N{KLXvobM*A#XP^J(QH#|-tam4mlf=6~PfTf@BpIs0!Os3p3%(5fgB_{EAMEe# zy|Z)Js*8$J-=?+S=CF{mCaCbR#D{<)`7BwBN;<H+V7(caiAoMOm_R$uFCb=;do`3T zL6p(pf-i`E34$@h1>Z>isPp#;yC>(C^d+V4Qfa_7j20&yd%~h^lXdF!F0$xqB}r0) zzGP6&WvYc=S7(Ryoa=<?d55pcZby3<<boiB9p-7Xj`jgJhulbNvF_MtCB+6w7P(rS zYqOnVIJN=D3;J4i`LFzo8^V|q+4VdJn_>iYj?m3HRY|R=ZFNWW+34vlK(Ao{_yTAH z8|o0gC?;h-{~2bx*!o*})<4*JRqnLhP#^4k_{oFEk6Oo1KWqK!>2HsZ?(DS8(cs`! z*^+1kaVTFIHm8}{C<F_}(roZ@_~G(9IaM&M^6m{m!;UtRwL5%*PR=Pzs?WuMZ|({t zcR*Z6jETKTpFdM3$;<4=Iqz)<6D~5y(9R@NJCe921g;`Qa;_gHxo?nRJZ{KVMp-1u z`R^0-<W$U2&}PUdxyWZM$B}aHI8oxgM5T1>`(UuS9N8ao^AVbBHHG?r9(?Q%us$b} z#Oa)>&DQ2>m5G9p8^n&2a}530bJnBt)Uo~iWvKJc0~Gv+S4S=9_s%|2HgLmT_J=uO zyHSAEDCe}zadgw8ARg|f@i5@yhkO?pYq@Jz(U|uxa$7bx{scjKr(vSQ-~=Z(rX=qt zB$t9T)LM=l*NhT+_B*^Nr<?=>_Sp2o?APou?Z<hNVb?yBj|4psCVZfS3Dc9K7H`;Q z$7ST~kI~4nS8BW2=ZxbxmW>~8*oES(u8E&R&ZNABrlI)1j6XkDtks>>+116hjfT}w Ich#c$C*2>tkpKVy diff --git a/color_classifiers/eval_atr_classifier.py b/color_classifiers/eval_atr_classifier.py deleted file mode 100644 index c6a14dd..0000000 --- a/color_classifiers/eval_atr_classifier.py +++ /dev/null @@ -1,69 +0,0 @@ -import sys -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np -from scipy import misc -import tensorflow as tf -import atr_data_io_helper as atr_data_loader -from train_atr_classifier import placeholder_inputs, comp_graph_v_2, evaluation - -sess=tf.InteractiveSession() - -x, y, keep_prob = placeholder_inputs() -y_pred = comp_graph_v_2(x, y, keep_prob) - -accuracy = evaluation(y, y_pred) - -saver = tf.train.Saver() - -saver.restore(sess, '/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier_v_1/obj_classifier_1.ckpt') - -mean_image = np.load('/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier_v_1/mean_image.npy') - -# Test Data -test_json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/test_anno.json' -image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images' - -# Base dir for html visualizer -html_dir = '/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier_v_1/html' -if not os.path.exists(html_dir): - os.mkdir(html_dir) - -# HTML file writer -html_writer = atr_data_loader.html_atr_table_writer(os.path.join(html_dir,'index.html')) -col_dict={ - 0: 'Grount Truth', - 1: 'Prediction', - 2: 'Image'} -html_writer.add_element(col_dict) - -shape_dict = { - 0: 'blank', - 1: 'rectangle', - 2: 'triangle', - 3: 'circle'} - -batch_size = 100 -correct = 0 -for i in range(50): - test_batch = atr_data_loader.atr_mini_batch_loader(test_json_filename, image_dir, mean_image, 10000+i*batch_size, batch_size, 75, 75) - feed_dict_test={x: test_batch[0], y: test_batch[1], keep_prob: 1.0} - result = sess.run([accuracy, y_pred], feed_dict=feed_dict_test) - correct = correct + result[0]*batch_size - print(correct) - - for row in range(batch_size*9): - gt_id = np.argmax(test_batch[1][row,:]) - pred_id = np.argmax(result[1][row, :]) - if not gt_id==pred_id: - img_filename = os.path.join(html_dir,'{}_{}.png'.format(i,row)) - misc.imsave(img_filename, test_batch[0][row,:,:,:]) - col_dict = { - 0: shape_dict[gt_id], - 1: shape_dict[pred_id], - 2: html_writer.image_tag('{}_{}.png'.format(i,row), 25, 25)} - html_writer.add_element(col_dict) - -html_writer.close_file() -print('Test Accuracy: {}'.format(correct/5000)) diff --git a/color_classifiers/train_atr_classifier.py b/color_classifiers/train_atr_classifier.py deleted file mode 100644 index 1bc622a..0000000 --- a/color_classifiers/train_atr_classifier.py +++ /dev/null @@ -1,196 +0,0 @@ -import sys -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np -import tensorflow as tf -import atr_data_io_helper as atr_data_loader - -def plot_accuracy(xdata, ydata, xlim=None, ylim=None, savePath=None): - fig, ax = plt.subplots( nrows=1, ncols=1 ) - ax.plot(xdata, ydata) - plt.xlabel('Iterations') - plt.ylabel('Accuracy') - - if not xlim==None: - plt.xlim(xlim) - - if not ylim==None: - plt.ylim(ylim) - - if not savePath==None: - fig.savefig(savePath) - - - plt.close(fig) - -def plot_accuracies(xdata, ydata_train, ydata_val, xlim=None, ylim=None, savePath=None): - fig, ax = plt.subplots( nrows=1, ncols=1 ) - ax.plot(xdata, ydata_train, xdata, ydata_val) - plt.xlabel('Epochs') - plt.ylabel('Accuracy') - plt.legend(['Train', 'Val'], loc='lower right') - if not xlim==None: - plt.xlim(xlim) - - if not ylim==None: - plt.ylim(ylim) - - if not savePath==None: - fig.savefig(savePath) - - - plt.close(fig) - - -def weight_variable(shape): - initial = tf.truncated_normal(shape, stddev=0.1) - return tf.Variable(initial) - -def bias_variable(shape): - initial = tf.constant(0.1, shape=shape) - return tf.Variable(initial) - -def conv2d(x, W): - return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') - -def max_pool_2x2(x): - return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') - -def max_pool_4x4(x): - return tf.nn.max_pool(x, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding='SAME') - -def placeholder_inputs(): - # Specify placeholder_inputs - x = tf.placeholder(tf.float32, shape=[None, 25, 25, 3]) - y = tf.placeholder(tf.float32, shape=[None, 4]) - keep_prob = tf.placeholder(tf.float32) - return x, y, keep_prob - -def comp_graph_v_1(x, y, keep_prob): - # Specify computation graph - W_conv1 = weight_variable([5, 5, 3, 10]) - b_conv1 = bias_variable([10]) - - h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1) - - h_pool1 = max_pool_2x2(h_conv1) - #print(tf.Tensor.get_shape(h_pool1)) - - W_fc1 = weight_variable([13*13*10, 4]) - b_fc1 = bias_variable([4]) - - h_pool1_flat = tf.reshape(h_pool1, [-1, 13*13*10]) - h_pool1_flat_drop = tf.nn.dropout(h_pool1_flat, keep_prob) - - y_pred = tf.nn.softmax(tf.matmul(h_pool1_flat_drop,W_fc1) + b_fc1) - - return y_pred - -def comp_graph_v_2(x, y, keep_prob): - # Specify computation graph - W_conv1 = weight_variable([5, 5, 3, 4]) - b_conv1 = bias_variable([4]) - - h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1) - h_pool1 = max_pool_2x2(h_conv1) - h_conv1_drop = tf.nn.dropout(h_pool1, keep_prob) - - W_conv2 = weight_variable([3, 3, 4, 8]) - b_conv2 = bias_variable([8]) - - h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) - h_pool2 = max_pool_2x2(h_conv2) - - W_fc1 = weight_variable([7*7*8, 4]) - b_fc1 = bias_variable([4]) - - h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*8]) - h_pool2_flat_drop = tf.nn.dropout(h_pool2_flat, keep_prob) - - y_pred = tf.nn.softmax(tf.matmul(h_pool2_flat_drop,W_fc1) + b_fc1) - - return y_pred - -def evaluation(y, y_pred): - # Evaluation function - correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_pred,1)) - accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) - #tf.scalar_summary("accuracy", accuracy) - - return accuracy - - -def train(): - # Start session - sess = tf.InteractiveSession() - - x, y, keep_prob = placeholder_inputs() - y_pred = comp_graph_v_2(x, y, keep_prob) - - # Specify loss - cross_entropy = -tf.reduce_sum(y*tf.log(y_pred)) - - # Specify training method - train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy) - - # Evaluator - accuracy = evaluation(y, y_pred) - - # Merge summaries and write them to ~/Code/Tensorflow_Exp/logDir - merged = tf.merge_all_summaries() - - # Output dir - outdir = '/home/tanmay/Code/GenVQA/Exp_Results/Atr_Classifier_v_1/' - if not os.path.exists(outdir): - os.mkdir(outdir) - - # Training Data - img_width = 75 - img_height = 75 - train_json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/train_anno.json' - image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images' - mean_image = atr_data_loader.mean_image(train_json_filename, image_dir, 1000, 100, img_height, img_width) - np.save(os.path.join(outdir, 'mean_image.npy'), mean_image) - - # Val Data - val_batch = atr_data_loader.atr_mini_batch_loader(train_json_filename, image_dir, mean_image, 9501, 499, img_height, img_width) - feed_dict_val={x: val_batch[0], y: val_batch[1], keep_prob: 1.0} - - # Session Saver - saver = tf.train.Saver() - - # Start Training - sess.run(tf.initialize_all_variables()) - batch_size = 100 - max_epoch = 10 - max_iter = 95 - val_acc_array_iter = np.empty([max_iter*max_epoch]) - val_acc_array_epoch = np.zeros([max_epoch]) - train_acc_array_epoch = np.zeros([max_epoch]) - for epoch in range(max_epoch): - for i in range(max_iter): - train_batch = atr_data_loader.atr_mini_batch_loader(train_json_filename, image_dir, mean_image, 1+i*batch_size, batch_size, img_height, img_width) - feed_dict_train={x: train_batch[0], y: train_batch[1], keep_prob: 0.5} - - _, current_train_batch_acc = sess.run([train_step, accuracy], feed_dict=feed_dict_train) - - train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] + current_train_batch_acc - val_acc_array_iter[i+epoch*max_iter] = accuracy.eval(feed_dict_val) - print('Step: {} Val Accuracy: {}'.format(i+1+epoch*max_iter, val_acc_array_iter[i+epoch*max_iter])) - plot_accuracy(np.arange(0,i+1+epoch*max_iter)+1, val_acc_array_iter[0:i+1+epoch*max_iter], xlim=[1, max_epoch*max_iter], ylim=[0, 1.], savePath=os.path.join(outdir,'valAcc_vs_iter.pdf')) - - - train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] / max_iter - val_acc_array_epoch[epoch] = val_acc_array_iter[i+epoch*max_iter] - - plot_accuracies(xdata=np.arange(0,epoch+1)+1, ydata_train=train_acc_array_epoch[0:epoch+1], ydata_val=val_acc_array_epoch[0:epoch+1], xlim=[1, max_epoch], ylim=[0, 1.], savePath=os.path.join(outdir,'acc_vs_epoch.pdf')) - - save_path = saver.save(sess, os.path.join(outdir,'obj_classifier_{}.ckpt'.format(epoch))) - - - sess.close() - -if __name__=='__main__': - train() - diff --git a/color_classifiers/train_atr_classifier.pyc b/color_classifiers/train_atr_classifier.pyc deleted file mode 100644 index 513dd9fab63523be1e847885b6c07fe3093f399c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7635 zcmcgx-ESOM6~8m<Z?9v=-;V3VNn6+yYAbFDP(njUKZu|v9iv1tMZ<V!yq?YM%w*=S z<E6V2Qu@HZK!w!j3h|5(54`cjQ~v?_$P)tkiUhykxih=#&;%jj+Pl~1ew};HxxahP zy~mZ$W~P37=g;@*s`wef-|yg?{{w}OkEJ4|_WQJ@_N}trR?UjqA1SM&syU|i$II%3 z+Mg`xDYZXc(p9xTL$<}oRv&=LY*|HnMOx;RKcYUs(&v>wD#X0<$Anl={<si}%AXKo zN%@mPEGvIXh!y2e3$d#Fst{|+pAq7M3f7f>PCc+y_E!~LP>+?eVo`OkHVd`Gf8l4h zJ_>RiU;e#~Z~hw;TB)Xy-KM0zjoJ2lw(5<l-k9o+siv*EN_8#u*j5%tW6+3tY?%s- zH7b=ZrYnk&`@$1*!3v`wNEcH~-_NCQoGoRan49x{rMj^Agz6gZn8(#7*~+otuvZ&{ z#Jidb#98tv4=rjS)p`=;dJ5G$I><a7CULGOP`y>Jw==Kasj*L2P^6KTp1ggKMv2b# zh%_czllw{J9R!h(9YeBvH;DrocM^pyAvz>z&D=W<4#S2>QIC>5sIgV5C%&gWp*o}- zir`GMmy&VfT~8m0P~>2v(L1T3R8r)3Ky4o-tzcVwam(v$-%k8s`|TjU_wKFo5A;ql zw;p+U9v+54mT&9K3uD*Q!>wCsN6&y86?VPCB$H^!mr*F|qBUpDSTlCjnk&DRI?X{r z`*`^iDo@4BnCf9)#|L;JY5;+eXQ77NMtKZy02)Bh1abe^vaA7Q9zO#zA4%UZWB`@| zYQGoo_Tf{&X^O3VIB`RE9TZBC<OrOo_M2%^Kgu1#MfUGUw5e>=Jui~KQBu#jl~M92 z$TqXEainXkJA{ivlR0GM5DX5jD3FSRMiBcBEk8hmQ)QLgQ{zDA912Zdf*M!$Q*(-` zAA8Zz+&RuT&te{h;6{r7GkEAmhQSjC`8t~NRTPBJGT~#bS*HL*a}v}vi8!1qAPj&2 z4*E;F2&-pFBm4}UjD=H9v+s(%a_#%UapS{Z^5=IqrAN+*KGf7)XYIJ|>A-j6Bx_-7 zXux}37J3I!P~+ONefg1>269@%IMksRovEE31w?~;T!^s+DRP=XSxZ*^88$Hnfn}a* zlfEIGO|TPdI2)iu?K)ka7i;Gd`>4B&axSxq@0mH3gV4+S+U{VXfwq)_Uv7&a;42^u z0{U^qs0}EXPXhtLgc#f2tMMT18b7PE&<_fi&r_9pz8}U7@y2_%-g$G#99%Mv1wpXN z@s4kxK--^amiN!fAkI~Azm9KC8I;*?uE_H%7I+QBumuYBp)x+UqWbulVBBvJANkm- z>`8Dj5%57C{wxr`6>=@-MHXLT@nsYPYdF+Z5YqBa+%!p|8U=D*;wXI21Q&E^<#O)M z$<8-XKU0%9U&RW>8sdSUX$|LPs{I99;nkB@Zwy%hq@trElm!?b2m=h3008_Wyrel7 z6#|2P;d94~FnA8UM(I!COnc?+4Wan30^ceJN*qJDh6O{45Yl2bF`IZ3#-23t>cLSG z`9UVf?=VU{{mPDnE`u%NM-Ed*g42T_NZm9`4$cfy3~)nYxnZ2Pb$%1A`2-4OzkonA zZ`G+1@kdp~vVQ~to6?{Xb8yncnyOc!2CPct>5V7^7hG_gAV%fdi+d_8IHFc)%TWe# za~;Lqm>h9m88x=&?T5TjHM}N-7p_FO+~6}M!*H*jEZ4xb1-BFI8ycs|M$?O13O?hq z_Jmn`T6L$fecYPekeZpD;|(U{)j{@9F8bletbT~w$AB_dlPl<OUSPp(HhzJ-o&`}` z9GAmbCEJ|^gP7@ONt(2^@xsZW#<>>P+VXU(9o6LaLNGY={mv^Ti~BBp?=_irFhn1n zqUn#WNszwp9@Y(YAQbr?iNCt$9!8$lyyXlxxD-qz=>T|ve`dhrg{+>mQn!(L>5+Tv zUV95I`63EsuUfPACA(Ug$KO?J$-ZI>04Z!AfG@+<oB;q0Rs~`Oj)1`-mw`Z1;2dBC zHc>U$>|@a29H5>MtW6rM0gY&y;$^HiUE&;vgg!>~s;AG)j6fG)#)Ehm=jTdDL%wq* z&S%TUKF()Vw<>GTn6>9rckXj=PMC%~<stp0!TB{(zkq4OKJ`?bJ72@#0mhxLv&uN_ ze1iqUQ;l0DQE>odhtb;k77O}r=i4Z>0j7Z4K#(FCwC@a|-OvW@B~7&NIIoix{Z_$B zTA#%Djx@*u&UZ2Pi$nJgvLh<Lh@wC@ZuLtB=T_Ao!a1)U+?YPje}D=Ah=c-52|e(* zaBxKx7?dhqo@s;w#>f%kg9M)X24oec2=?UKwkuIxp8n>t%jJTHZQS<Z<CK>*=#c0b z;RD+Bz-<M%1yioNmurXEFtJVC6HTcZN$N?K1$8Z+BdjxFAa|tlaeQXxrpFHOhHIN7 z1!_6fQ1(mK#Y&wN@9%uvNbLF)(TtXX-IQYVhkcZKucGj3f(;qLU@MCTWKsp~IMb(1 zY7#1oIJ?sfVdz9w2Wc(w1x&a|f`t)H;t_{~vAB{ZoB)<Zm_rNB?6hiDRda@s3+(b= zOAAi7R3+yG0j3yH;W*%L;4Bakd+-sns)?grRbAxD<hc)jK^KFx0X)Td)toQ6FOVG~ zD};dKKxuLEVlf#_ND&~M7W2ndZ<evJi;UZ}T7xC7ldOWY)pcaZV3q5=xl-*V(dRs4 zdvjSDSJ(n&ETkZMNXea3J>=tNRc}$mT2);vy`UBg*SRFEYiuf7zy{(Ex2&j>4O?H} z7%+b0C$n~MRh_(Hv1!pXUAKB`Vw7RMU>&fTSBons2KrqP{Vojj16k?^n^M2^LLpo> zaBW~F1uCv%DeFCK+;gh8p}H%oy9%pps1-xrV6p}i=O|HkN!Au^7guih^Q!qATd>dy zra^27b!yBScINHx=?&5TB87WmIQnHaa+&|pciD%Hp)U3soGC9-pFqrv$PODDpcyDE zi&L;G*27P%`=uI-YN=G?v{>C`>Ul$7QvS-2Y?3xzDP;1Ykt9?xk}_WQVRxijA!pf8 z%J*MnL$Z|za?I)qgaNBlqOXe?S7~96!(Loe5B~ry0KZKF&!jb<!)f1b{PW|#fAY@3 z-J3Z>(o^$(Z=R&C6Xfkk=i9fC<h$KZ{qbV59ey8^b4Hk_awrmra>2`8W+wAMmubT| zPPUpkQV9&0|09dA<u&j^|4RiS*{2x1IMExs(z6w(op9qtbbiL5jXv{WYmidvZ#P4^ z&AOnArcZCm)8a5dg2#uG&M%;EPY3C1oA38FH<63mEc3f;c_J;A)6K)vVu*X3yCD+8 zTd992`89`+ABXI6x>|UB4hfj{;}}VcCI`)-#H#y#Z>#<w)wRo<DeolmzY{axjQDXJ z>;=fdCb9D>2XGrCA6*2cyxl7Dh9*^PvU<F^-}1fI4^ths@SM-gI~`dn)*OdS7ErW; ztP!|g6uIEjLSiY%B@LDo*=wee8AXDVFh@q45wi8b4>Nd9C<l~qmt8#J_?)Aa2fHGC z;#2}AQ_?~-6Qnzrin%R_;~sdrepGTG|2>S&L?z_lH1mK(ZViHqq-Flgh!eC@-4Vf{ zCB!C+%!?alJqAQk;E-uN?NH|;x^s-fT%*a5GB_GpVeKbxYZBaP(>j*GcE%bngpMd| zvf??g<Gfok7pwZR4&FC37qc{BtKmKheOOfBh}w}P^kgFp8y*n%FpPrOYX!w5Kpz;* zoS%|A`9LLOHOaj9F^o)EB-+#1W;2X!na+`k%$o=j`$-!@S1v8&NvwKV=5++oBGu_; zGncz(q#aB!Jv6<ca12!_S0^)41QE;(#FZX-=ev3Z<MHp542Csn=>is5K_sV2ZVE7- z)$Pf-4+-ErRel$RvUweyuWVG-E3=hqWel(XIh5x2qIIcq*?Jy53q{K^`sb|${9d)5 zw=ZMvI;e+ux-HwOy;Tu=yKamBKXGex%7X8_lRH0Vg%1RvlX>tcJiz}{NC|ulDY<{i zOAn0_*C!i3pjs(FCT(%Mm3ADD+=(_F#Ceh-*YarKVfx26On}5F4Kjx=()<&F2gRI@ zE2Lgvag_yM^3FC3aYwgF(I<&d5i=Ir62j0HU1#^im;mz|t;BCf!Ry3a&gH6jR?Om; bf3c7A^9z$B_^rtA_~hc`+T`TqTUG1dGfz&Z diff --git a/object_classifiers/#eval_obj_classifier.py# b/object_classifiers/#eval_obj_classifier.py# deleted file mode 100644 index 28fa50e..0000000 --- a/object_classifiers/#eval_obj_classifier.py# +++ /dev/null @@ -1,45 +0,0 @@ -import sys -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np -import tensorflow as tf -import obj_data_io_helper as shape_data_loader -from train_obj_classifier import placeholder_inputs, comp_graph_v_1, evaluation - -sess=tf.InteractiveSession() - -x, y, keep_prob = placeholder_inputs() -y_pred = comp_graph_v_1(x, y, keep_prob) - -accuracy = evaluation(y, y_pred) - -saver = tf.train.Saver() - -saver.restore(sess, '/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_1/obj_classifier_9.ckpt') - -mean_image = np.load('/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_1/mean_image.npy') - -# Test Data -test_json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/test_anno.json' -image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images' - -# HTML file writer -html_writer = shape_data_loader.html_obj_table_writer('/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_1/trial.html') -batch_size = 100 -correct = 0 -for i in range(1): #50 - test_batch = shape_data_loader.obj_mini_batch_loader(test_json_filename, image_dir, mean_image, 10000+i*batch_size, batch_size, 75, 75) - feed_dict_test={x: test_batch[0], y: test_batch[1], keep_prob: 1.0} - result = sess.run([accuracy, y_pred], feed_dict=feed_dict_test) - correct = correct + result[0]*batch_size - print(correct) - - for row in range(batch_size): - col_dict = { - 0: test_batch[1][row,:], - 1: y_pred[row, :]} - html_writer.add_element(col_dict) - -html_writer.close_file() -print('Test Accuracy: {}'.format(correct/5000)) diff --git a/object_classifiers/__init__.py b/object_classifiers/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/object_classifiers/eval_obj_classifier.py b/object_classifiers/eval_obj_classifier.py deleted file mode 100644 index 9853116..0000000 --- a/object_classifiers/eval_obj_classifier.py +++ /dev/null @@ -1,69 +0,0 @@ -import sys -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np -from scipy import misc -import tensorflow as tf -import obj_data_io_helper as shape_data_loader -from train_obj_classifier import placeholder_inputs, comp_graph_v_1, comp_graph_v_2, evaluation - -sess=tf.InteractiveSession() - -x, y, keep_prob = placeholder_inputs() -y_pred = comp_graph_v_2(x, y, keep_prob) - -accuracy = evaluation(y, y_pred) - -saver = tf.train.Saver() - -saver.restore(sess, '/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_2/obj_classifier_1.ckpt') - -mean_image = np.load('/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_2/mean_image.npy') - -# Test Data -test_json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/test_anno.json' -image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images' - -# Base dir for html visualizer -html_dir = '/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_2/html' -if not os.path.exists(html_dir): - os.mkdir(html_dir) - -# HTML file writer -html_writer = shape_data_loader.html_obj_table_writer(os.path.join(html_dir,'index.html')) -col_dict={ - 0: 'Grount Truth', - 1: 'Prediction', - 2: 'Image'} -html_writer.add_element(col_dict) - -shape_dict = { - 0: 'blank', - 1: 'rectangle', - 2: 'triangle', - 3: 'circle'} - -batch_size = 100 -correct = 0 -for i in range(50): - test_batch = shape_data_loader.obj_mini_batch_loader(test_json_filename, image_dir, mean_image, 10000+i*batch_size, batch_size, 75, 75) - feed_dict_test={x: test_batch[0], y: test_batch[1], keep_prob: 1.0} - result = sess.run([accuracy, y_pred], feed_dict=feed_dict_test) - correct = correct + result[0]*batch_size - print(correct) - - for row in range(batch_size*9): - gt_id = np.argmax(test_batch[1][row,:]) - pred_id = np.argmax(result[1][row, :]) - if not gt_id==pred_id: - img_filename = os.path.join(html_dir,'{}_{}.png'.format(i,row)) - misc.imsave(img_filename, test_batch[0][row,:,:,:]) - col_dict = { - 0: shape_dict[gt_id], - 1: shape_dict[pred_id], - 2: html_writer.image_tag('{}_{}.png'.format(i,row), 25, 25)} - html_writer.add_element(col_dict) - -html_writer.close_file() -print('Test Accuracy: {}'.format(correct/5000)) diff --git a/object_classifiers/obj_data_io_helper.py b/object_classifiers/obj_data_io_helper.py deleted file mode 100644 index d0427ab..0000000 --- a/object_classifiers/obj_data_io_helper.py +++ /dev/null @@ -1,90 +0,0 @@ -import json -import sys -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np -import tensorflow as tf -from scipy import misc - -def obj_mini_batch_loader(json_filename, image_dir, mean_image, start_index, batch_size, img_height=100, img_width=100, channels=3): - - with open(json_filename, 'r') as json_file: - json_data = json.load(json_file) - - obj_images = np.empty(shape=[9*batch_size, img_height/3, img_width/3, channels]) - obj_labels = np.zeros(shape=[9*batch_size, 4]) - - for i in range(start_index, start_index + batch_size): - image_name = os.path.join(image_dir, str(i) + '.jpg') - image = misc.imresize(mpimg.imread(image_name),(img_height, img_width), interp='nearest') -# image.resize((img_height, img_width, 3)) - crop_shape = np.array([image.shape[0], image.shape[1]])/3 - selected_anno = [q for q in json_data if q['image_id']==i] - grid_config = selected_anno[0]['config'] - - counter = 0; - for grid_row in range(0,3): - for grid_col in range(0,3): - start_row = grid_row*crop_shape[0] - start_col = grid_col*crop_shape[1] -# print([start_row, start_col]) - cropped_image = image[start_row:start_row+crop_shape[0], start_col:start_col+crop_shape[1], :] - if np.ndim(mean_image)==0: - obj_images[9*(i-start_index)+counter,:,:,:] = cropped_image/254. - else: - obj_images[9*(i-start_index)+counter,:,:,:] = (cropped_image-mean_image)/254 - obj_labels[9*(i-start_index)+counter, grid_config[6*grid_row+2*grid_col]] = 1 - counter = counter + 1 - - # imgplot = plt.imshow(obj_images[0,:,:,:].astype(np.uint8)) - # plt.show() - return (obj_images, obj_labels) - -def mean_image_batch(json_filename, image_dir, start_index, batch_size, img_height=100, img_width=100, channels=3): - batch = obj_mini_batch_loader(json_filename, image_dir, np.empty([]), start_index, batch_size, img_height, img_width, channels) - mean_image = np.mean(batch[0], 0) - return mean_image - -def mean_image(json_filename, image_dir, num_images, batch_size, img_height=100, img_width=100, channels=3): - max_iter = np.floor(num_images/batch_size) - mean_image = np.zeros([img_height/3, img_width/3, channels]) - for i in range(max_iter.astype(np.int16)): - mean_image = mean_image + mean_image_batch(json_filename, image_dir, 1+i*batch_size, batch_size, img_height, img_width, channels) - - mean_image = mean_image/max_iter - tmp_mean_image = mean_image*254 - # imgplot = plt.imshow(tmp_mean_image.astype(np.uint8)) - # plt.show() - return mean_image - - -class html_obj_table_writer(): - def __init__(self, filename): - self.filename = filename - self.html_file = open(self.filename, 'w') - self.html_file.write("""<!DOCTYPE html>\n<html>\n<body>\n<table border="1" style="width:100%"> \n""") - - def add_element(self, col_dict): - self.html_file.write(' <tr>\n') - for key in range(len(col_dict)): - self.html_file.write(""" <td>{}</td>\n""".format(col_dict[key])) - self.html_file.write(' </tr>\n') - - def image_tag(self, image_path, height, width): - return """<img src="{}" alt="IMAGE NOT FOUND!" height={} width={}>""".format(image_path,height,width) - - def close_file(self): - self.html_file.write('</table>\n</body>\n</html>') - self.html_file.close() - - - - -if __name__=="__main__": - - html_writer = html_obj_table_writer('/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_1/trial.html') - col_dict={0: 'sam', 1: html_writer.image_tag('something.png',25,25)} - html_writer.add_element(col_dict) - html_writer.close_file() - diff --git a/object_classifiers/obj_data_io_helper.pyc b/object_classifiers/obj_data_io_helper.pyc deleted file mode 100644 index 48169a55a825c93a57adb1cf4cc9cfeb258f4b15..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4545 zcmcInTXQ2v6+S)kMUv&q+VVwV0j;-E5nCy1O^SsIve{+5goMOP#vw~ol}wFiM$#yA zk!N~YJ3I40dEu2;ehzQ^iu{T^@Q|v&cTP(-1bCndOEW#EyU+AF=R23~seH9u|Ld1u ze(cfoH;?B-+<8M0`M4BCMExsTQD4o*E$Uk{x<cbQ>d()*3p8G&es$KZQNKQ;m#E(m z+M;Ng;uY$zkn$_U*Btrt^r}MrRq2|M1sPfUr;$Y&xkWw%psmp>qD!Jr`s);IkYA_I zEy}7YSSNppE}?RhLTC!H%k&^<(s+~n74jiul`bJ<jUM#3h$prw*y1-7cXoJDl?VtY zopqoR{vE&L4xk}Dhb9hlF9b>yR?v7=G%egqxF6%r|7d}csE{QtE&9_Il~vM;#ugP- z7F5>g85a2Pw^|VNp=O1OIcgTba*j5O1@l;>;T+UaWu2H4lUJO8Y-q{c5}h|Jy+8$) zkX5!J^XSvRsj@+Ok;YXT*QijW>r^yYy1zioQZAETqA{43X}m(kGS5K)2p>SqA_IMC zsd7bRuTrr>WB8*+ttnNf7OQ{-p>nxOL)ct{<l9uPQ32K^ur|akUvvL6i_X5L@)i~1 zz7|~5D&egfl^b+^q+kPREGGKDrOGCq-}}~ROO=~cG^v0ap-Hpcq5=cxL7Qf|%~}>1 z!(}+O{{YZ2jLR_b0I<geVo-q!J7OTzGRD+I<8M^4!;!ODAcg^JgTdWt78|sE_4dCL zgE{IYJ;JM4YzN`?zrPMciluLhEC3Tc(4;2976v!BTkug2+JJo<@SQzV&(0p<{yjZ| z##jOKC!Je}$74huQKmV`^N~9V!Wt-v-EfY_xY-*|hQJ@=i4L*};)W;*+$_j-2$DjH zeh&38c87rz`ois{$sioU@%a5EeHebj46+PzfFye)WztEI2r<r6Lqw_TYi3F&qFE46 z^o3Ag1X-F3mAT0<5GD+;mJ?Tx%rYtT<m}6JX5&HHs0Mw*F9LZvo`mt#q&N#8jO}-` z%)PK#tSpoGVcg-|MG6qHZq6W#g2asjEg*@lewZP5koSR`IAT8XhDE!XcEZFD&Sm=4 z)!xVv`OFRJ&L{|nBRySe=)3@GBhl0wxk(a4xlCQjz3%z0c4g7@bZkWCSKY`x<u}<O zp$r<2h|+AM(U4W}vUK8z%Z2+)tjvQb@O0oi&>$5<hFR!4#*^ZAFMZBIAd1RdmcF>| zcxfbh8uNLIkup7=V<cJd1R_M04hBe?{Fk7*qcje>+D&5jqWd`YgYG*)^6`g{W)D~l z{TwfH^E@1cL6&zJ1V+ROQ$QL`f^2Vcp*OJ@&%|L8ItF|Wsvu+2^Ec6uwXQa;d+N6J zn)<$a!+JygK&@B{Y774rP(M<e>Zevuy{We5JdSv_gl{S1M<7fY=i({V8-dO>5*0-X zttpn5RVo*S0fVTTidwk2z*f23pL96fjzcum;UKo}bHhs797Ym|99kWgY;SY3!_8~l ze21Ic+<cdt@1c=E7JF?D-J61U4QpRVyaAX&%{jeE-2;Y#8QZ|d2xfl<0nElTkTwAq zgM%S}hK*jKB4Qik#{vy23@2bZe~(Krz{7EZdFOc?*U_i<d9MVDyuV?4!Cr%PFj7GY z{1+!Iq6}8)?0u9!z`lqLNzg~>ZesToKQQ>mrtV4daIxyJ+r=3I>mW+g%x>@)$FbBn zH`f;vQ&UiM_8-`9@q29cwX-A-ljrd?SO25is(2%I&z%stC7!eIvWOLK;)!#O^$mGc zWbQ*+<-|s5Y`<&#J3>MUrWx<H+HR((Y*#I4?5R)MH-$F=Ri-EtwVIbSNjnS0J#T zDn5+$aj3F{fTJry%#(d0LGN)A@Lj<zq4Glz@F%?L001Gzvl9~!!p{;lV_6l!pnO6y z@%v^+dyzi@-Me%2^zlcZ{Q60o)jzEFrjOIqzd%n+X`iMU>cYY9{%#x2jDmw*DaCK^ z-@pI*?qRzwDoTl)m4BP#LMoe}!ow0E0*Yj`<SmNn!0v(dhI}SEIoM=E?KnS!oN4ye zP`LzBhJ%Z~K>8U71`QXSBN?R5e@xgxIlH7UNp+V>gRWt;IPKUK7PU;?sk6JtqpOu9 zO%42#LpfKfHhw*w9oBQM6sG4NzASrP?PHKjf(&+<r^7nh2izQTBgw_OW3!bc8>Ct6 zYVoekc8cNHP8>h<wCwbs2NyR5GUu}E`wn)-82TKcn{&33T2n1`M|o`Z>=vXqFaUso zw80If2%i+wQlI|>vpwwp?L6}ic3+meZ8y>fyYGJR=$$9+<EJ0BfARD;$47T|+va?5 z@Um=6Jc2ls&kUOr&1@C|27_sKc{u@b1)d%)7*~1srm$gG8`0VwzK2fESxm~~dcZe# z1(Q1<u4BeT-?YykD%1RB_T!v4I0OpU86;_Unyg*PT@wUXu_9^bMQI)wXc&;2GL~@` zQF09Y06jSza~yOS^l3WsQOj|p<~vTD`p;1<9s3~|Y%WRmyWIQ=&9$CF{a?>N_S+D^ z$t0n=plb8Ag<55*VeE#9<K!I4|Mk4}<b2}TLH<0_dH00B3!KMSXD;V=&OQ=1bfZ1? zi8*S!aftOO=j6?Ch|{Am8SYJzVaVqY`9d%TbJwn-G4EaEwrq0zF@hwYM5&I#Qyk%# zlI)+7To5La)^hZ?W|Z)=U*|<R?<63w*G?|XzRgb4L6WB#cJCMRxu6HagwJ#^VS0Mz z;*Hzvyo{awJ{mayONlqzo$(#VvvK83dr|z=Gck0?>6Ew8G!*|=@#p87wZ6MPzrMV& M)vy}su3A?A0FP+FKL7v# diff --git a/object_classifiers/obj_data_io_helper.py~ b/object_classifiers/obj_data_io_helper.py~ deleted file mode 100644 index c550af7..0000000 --- a/object_classifiers/obj_data_io_helper.py~ +++ /dev/null @@ -1,48 +0,0 @@ -import json -import sys -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np - - -def obj_mini_batch_loader(json_filename, batch_size, start_index): - with open(json_filename, 'r') as json_file: - json_data = json.load(json_file) - - obj_images = [] - obj_labels = [] - - for i in range(start_index, start_index + batch_size): - image_name = os.path.join(image_dir, str(i+1) + '.jpg') - image = mpimg.imread(image_name) - crop_shape = [image.shape[0]/3, image.shape[1]/3] - selected_anno = [q for q in json_data if q['image_id']==1] - grid_config = selected_anno[0]['config'] - - counter = 0; - for grid_row in range(0,3): - for grid_col in range(0,3): - start_row = grid_row*crop_shape[0] - start_col = grid_col*crop_shape[1] - print([start_row, start_col]) - obj_images.append(image[start_row:start_row+crop_shape[0]-1, start_col:start_col+crop_shape[1]-1, :]) - obj_labels.append(grid_config[6*grid_row+2*grid_col]) - # imgplot = plt.imshow(obj_images[counter]) - # plt.show() - counter = counter + 1 - - - - - -if __name__=="__main__": - - json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/train_anno.json' - batch_size = 1 - start_index = 0 - - image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images' - - obj_mini_batch_loader(json_filename, batch_size, start_index) - diff --git a/object_classifiers/train_obj_classifier.py b/object_classifiers/train_obj_classifier.py deleted file mode 100644 index 19b48a2..0000000 --- a/object_classifiers/train_obj_classifier.py +++ /dev/null @@ -1,197 +0,0 @@ -import sys -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np -import tensorflow as tf -import obj_data_io_helper as shape_data_loader - -def plot_accuracy(xdata, ydata, xlim=None, ylim=None, savePath=None): - fig, ax = plt.subplots( nrows=1, ncols=1 ) - ax.plot(xdata, ydata) - plt.xlabel('Iterations') - plt.ylabel('Accuracy') - - if not xlim==None: - plt.xlim(xlim) - - if not ylim==None: - plt.ylim(ylim) - - if not savePath==None: - fig.savefig(savePath) - - - plt.close(fig) - -def plot_accuracies(xdata, ydata_train, ydata_val, xlim=None, ylim=None, savePath=None): - fig, ax = plt.subplots( nrows=1, ncols=1 ) - ax.plot(xdata, ydata_train, xdata, ydata_val) - plt.xlabel('Epochs') - plt.ylabel('Accuracy') - plt.legend(['Train', 'Val'], loc='lower right') - if not xlim==None: - plt.xlim(xlim) - - if not ylim==None: - plt.ylim(ylim) - - if not savePath==None: - fig.savefig(savePath) - - - plt.close(fig) - - -def weight_variable(shape): - initial = tf.truncated_normal(shape, stddev=0.1) - return tf.Variable(initial) - -def bias_variable(shape): - initial = tf.constant(0.1, shape=shape) - return tf.Variable(initial) - -def conv2d(x, W): - return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') - -def max_pool_2x2(x): - return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') - -def max_pool_4x4(x): - return tf.nn.max_pool(x, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding='SAME') - -def placeholder_inputs(): - # Specify placeholder_inputs - x = tf.placeholder(tf.float32, shape=[None, 25, 25, 3]) - y = tf.placeholder(tf.float32, shape=[None, 4]) - keep_prob = tf.placeholder(tf.float32) - return x, y, keep_prob - -def comp_graph_v_1(x, y, keep_prob): - # Specify computation graph - W_conv1 = weight_variable([5, 5, 3, 10]) - b_conv1 = bias_variable([10]) - - h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1) - - h_pool1 = max_pool_2x2(h_conv1) - #print(tf.Tensor.get_shape(h_pool1)) - - W_fc1 = weight_variable([13*13*10, 4]) - b_fc1 = bias_variable([4]) - - h_pool1_flat = tf.reshape(h_pool1, [-1, 13*13*10]) - h_pool1_flat_drop = tf.nn.dropout(h_pool1_flat, keep_prob) - - y_pred = tf.nn.softmax(tf.matmul(h_pool1_flat_drop,W_fc1) + b_fc1) - - return y_pred - -def comp_graph_v_2(x, y, keep_prob): - # Specify computation graph - W_conv1 = weight_variable([5, 5, 3, 4]) - b_conv1 = bias_variable([4]) - - h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1) - h_pool1 = max_pool_2x2(h_conv1) - h_conv1_drop = tf.nn.dropout(h_pool1, keep_prob) - - W_conv2 = weight_variable([3, 3, 4, 8]) - b_conv2 = bias_variable([8]) - - h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) - h_pool2 = max_pool_2x2(h_conv2) - - W_fc1 = weight_variable([7*7*8, 4]) - b_fc1 = bias_variable([4]) - - h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*8]) - h_pool2_flat_drop = tf.nn.dropout(h_pool2_flat, keep_prob) - - y_pred = tf.nn.softmax(tf.matmul(h_pool2_flat_drop,W_fc1) + b_fc1) - - return y_pred - -def evaluation(y, y_pred): - # Evaluation function - correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_pred,1)) - accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) - #tf.scalar_summary("accuracy", accuracy) - - return accuracy - - -def train(): - # Start session - sess = tf.InteractiveSession() - - x, y, keep_prob = placeholder_inputs() - y_pred = comp_graph_v_2(x, y, keep_prob) - - # Specify loss - cross_entropy = -tf.reduce_sum(y*tf.log(y_pred)) - - # Specify training method - train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy) - - # Evaluator - accuracy = evaluation(y, y_pred) - - # Merge summaries and write them to ~/Code/Tensorflow_Exp/logDir - merged = tf.merge_all_summaries() - #writer = tf.train.SummaryWriter("/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier", graph_def=tf.GraphDef()) - - # Output dir - outdir = '/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_2/' - if not os.path.exists(outdir): - os.mkdir(outdir) - - # Training Data - img_width = 75 - img_height = 75 - train_json_filename = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/train_anno.json' - image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images' - mean_image = shape_data_loader.mean_image(train_json_filename, image_dir, 1000, 100, img_height, img_width) - np.save(os.path.join(outdir, 'mean_image.npy'), mean_image) - - # Val Data - val_batch = shape_data_loader.obj_mini_batch_loader(train_json_filename, image_dir, mean_image, 9501, 499, img_height, img_width) - feed_dict_val={x: val_batch[0], y: val_batch[1], keep_prob: 1.0} - - # Session Saver - saver = tf.train.Saver() - - # Start Training - sess.run(tf.initialize_all_variables()) - batch_size = 10 - max_epoch = 2 - max_iter = 950 - val_acc_array_iter = np.empty([max_iter*max_epoch]) - val_acc_array_epoch = np.zeros([max_epoch]) - train_acc_array_epoch = np.zeros([max_epoch]) - for epoch in range(max_epoch): - for i in range(max_iter): - train_batch = shape_data_loader.obj_mini_batch_loader(train_json_filename, image_dir, mean_image, 1+i*batch_size, batch_size, img_height, img_width) - feed_dict_train={x: train_batch[0], y: train_batch[1], keep_prob: 0.5} - - _, current_train_batch_acc = sess.run([train_step, accuracy], feed_dict=feed_dict_train) - - train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] + current_train_batch_acc - val_acc_array_iter[i+epoch*max_iter] = accuracy.eval(feed_dict_val) - print('Step: {} Val Accuracy: {}'.format(i+1+epoch*max_iter, val_acc_array_iter[i+epoch*max_iter])) - plot_accuracy(np.arange(0,i+1+epoch*max_iter)+1, val_acc_array_iter[0:i+1+epoch*max_iter], xlim=[1, max_epoch*max_iter], ylim=[0, 1.], savePath=os.path.join(outdir,'valAcc_vs_iter.pdf')) - - - train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] / max_iter - val_acc_array_epoch[epoch] = val_acc_array_iter[i+epoch*max_iter] - - plot_accuracies(xdata=np.arange(0,epoch+1)+1, ydata_train=train_acc_array_epoch[0:epoch+1], ydata_val=val_acc_array_epoch[0:epoch+1], xlim=[1, max_epoch], ylim=[0, 1.], savePath=os.path.join(outdir,'acc_vs_epoch.pdf')) - - save_path = saver.save(sess, os.path.join(outdir,'obj_classifier_{}.ckpt'.format(epoch))) - - - sess.close() - -if __name__=='__main__': - train() - diff --git a/object_classifiers/train_obj_classifier.pyc b/object_classifiers/train_obj_classifier.pyc deleted file mode 100644 index 550d73d4e5c289d02061f24457f8bc52c638aaee..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7697 zcmcgx-ESOM6~8m<Z?9v=-*)1}NmJOCVk>S5P(nkRmQtXmj8THDpkX{SUe9KBW;1iw z@zPxhDShByphDugLcAg&9(d!4r@kZgktYOHydlBwckayYIy6B@xc2V#xnJj=bMEh+ zbMJBGlbNZP{_&@~zAAo3@b??|=6^>K;A5#+sr^1}seP+#w^gH}_D9O<sA`O<{qeFo zq4p<BdP?n2mvmL_&ya2LvDN!vGFw*BUXhkL6^y9&vGjQrj0!QYf-xZ$R4^{Yq6#L2 zSW>~H5X&l<5@JOK(?YDOpen?g3TA{@SK$Q}oKp{MmHkD9>*}#mRwAnI)n=h~^e_DE z`lBGn@#Wt;_~ySxp_OVF*=<T1*qCkKw^eUc^~O|pOf_uPRjO;L$F{OK8iPjEW6M-v ztWl|SF<nuFJP@9k3sx8fLAsb?`hF^X<7_GW#N3?s3)O|iCsfyP$2_h!$ySa9hrQYu zB;M6jAj#54d1O%osrqT0>nT*<)?w!9C{1!bf$DAFZ)cw0sj*L2P_$w#J$d_}6{kAa zBhr{^P3|YLcM!%xb_~hx-82bh+({fYh3Jr=HFNJcJdElhg&(JRSYxYHPXbSSLUl+v z6v3HhFD2u|cRhV1LXm@wM(?DCQc0128`Snu+6=d~mo&Z3_MJ2cx8DkryWhWE{z(rS zp|4#(_VPSBjKVD6)|nS2E;@!<w^|)N19sHd^$MFzq9I>Kp{z^RoHb+3*i~z;{8oJ0 zgOc_E^G8&k3Yam~!^Vye07KXS10&CZ4Y`f-9N+}h0ig*3|FLCR1I#>r24;RKeZ!am zS_-WFPSD%OPl2Z?w)XMF9ocnID1nkAc%s^Gwo?BncZe6+z#q`2vQu}xSpLSTpK~wc z^ih~?W>Nh}*I0Lm7l$Tu$jBiY99mH@6^HdO2^?B}00*baDz~S`fzCM;n!E%yuI#7g z6jML;;-R^7oN=DTJPHAh6#sAZ(2)$ICkpcmXv$Yn5I@VrkF{o<LJ;jqSkorra;}0f z2m(6jG3g|%o+XX&G;lK(&N=PAD+bGT5QN9|4}Q*{-`<iQIVt*3Q+1uS6W`Ne;3jF- z#NN<?cfBm~4&t!JwPpYEBd-<8d5w}tM_zokhI$kd5AJax#}>rMX#-^~S^hJOVk`pF zJk=<DOE{ZgC+2WAK#ATyy`Gn7=Q8`Kyo_?Lu!`^5`IUpn%lqo?V5Nb&6oX%Gj9}mk zAPffjvBl^OFqlsrLBWI>+up13EbSU!tFtHw3%AdM)$)QMO6uZ~_in%Q=8!$OWReJm zV3y;Zz<`0qKhZ4jotZ_P7r_1ozBz?ZX2-cC&#;)_RS?4_DD;QU_}Gg2<6|OnzeW7y zW2>?!5yphU2YK|9P<&U!wVdZ!e2&HEQ4GxCP+7r9(>rlnX&Tokkn<u(;d?g7pkFK3 zb9YX5zKZ&p+QfMYD;RT#8-AuaoG()E&)N*HoV;>#$P6GAy&cgk(C|PQXs`qX;3VNH z&C#e38uSgHI%b3+bl^EkrwWhSD=%>f$cGg;SUFST9HKQW7*T|XCaa0v#G^0*wPMc? zkJ2~@GdY2Waq8)pcO-xrbP+*vo;nhu9)w}bZDr}f*`bPYZb&XSN?L85-$H9XfkN5W z5RT?8pE?nN)KyIT2M{nSEh>=*hfU0>dKId`s>Gk(h(d_LMYjQBRIb0cufl{QYK6ue zWh^(>QrwZr5!aScV|-qM$O~1&YeIP8W`sKqK2tIb_v^`W4cuOENx{aUajI-Iy|}F4 zGcIdSn6;->cN+W0z1fYZnb|sCWFp=mWE17WAAQK`2e^a`D04%(iVo)*3+}UV4czuD zjN9V790n}e?<^cd&LB%$X<HjloE~bNY;mtmPdD3fO)fD+gG2xCyj-%l=hFXPmuUw> z^wBArF6p|2>U-{?Z>R&I$oEKG)^+zV_O#|@XSl(oU?NEe$P0tBLmn??e%frg^~`G> zxySDHZ=xk%M4{|eYu3JOS1a@QyJ{`jS8YKc(b`Ag>+m*bfItIQ!B_z#fN;oZAea<D z2O0rRR1G-$AT&S+swV_&lLl;nBbugoC+kg@(8MwTp}}jlL_dxWU61O`oIXkCWELQf z=khT6XNS1WmfYsb#y<MzRCh)MI%fnrue#?y1^onX#IqmKjT-b{C-pPfJL*$UMZfbU z3?3lg`7)~v;LcZAFl^Phj}k5i@OBu+ov*Q=OLxAGLL0aW<P9(>k~srALubIy<_wfH z&%n+QgB@Yuyhh%1Z$+03d-5pkNQ0U1I>vs6hhPWU6qRqHC=LPM088dL2sLydct_!8 z^$)~%Q2|nsf?z383ceYxu&4s8Ql)b=jd0Z%IYP{kKyttUufn9lo?P;F<pv<h0&_R! za^b@^UI*}o%FF6>U38)F7HvOtn;|aEl*{*W?GP^}42mP8H8pqNPqU0~ZTgtVXNo~C zROM^>>?}^_9^%#4mb?bERyp-h_KVh~icgC7eZGby%zcc&N7KM?O7W%u11WXCqVUXu z5gF`YEQ=Oo9tQ0={ikheaxjZH(9?`|=tLq2nK1Dh%+W|rhQUul7l(tfIGqOE1(rob zL<<h`v}#mUV}?Nv?DAhp%ekT|X(|XX#o!AU1JBf}GI-LD%&G>?f0Zu4Bnaqtz#WMO zxRvv&F<)|DAUgzM2+=*q(&FUBVltYLR6yh{=8vo1EW>6ONxEsZ220!^Sp{jU8%V0b zDmQv?Mn>%=(FdLf)n#d1VGES8kaX)Ii#Mx!Na4+?-lB-Ls=8QuK`j<ec1c>-*i^KD z4a6UASy3k&wqEBLFn;4lvvzM)oxEYOY0)&@uzG7^lwrMK9k7{Kiz_Gw`mKw8>jV8j zmij>x>UW_~2=@})FqnG*g9}*7x{r;!sCpZ!yP~?Qu*!y7G2~6AcQA2|5_OkkZP9jd z<z}#;8W-7ug;p>PVne7?W7e=UZ+%N|i1wE#+!MpmFSC*U{ExmX0kWe|7kdrPlvhS! zyGX;L251Hf*dYd}iuLd#>t3nGqFO4|I4xFpnR?#TmsPMjB%5SfR|=T|m|1eL7)cqg z1lSp5XCY_VP|EjTWP^=)Ajd3(GC&xxIwkr=G2<#NEIO#(CH3(4&;sz=B=AfolsC8C z`aeJX+ehylyn8EWBzkJ9@XeE!>x6kb*7^1x6Cv)Me!38sn4RtDJD8p`&ODVzk#&^Y zVD2)VnTNVaHF`;sZZ&cw9~ejfM;1}jtK&)k=L#ZqKrwj9qC<A2XDexSqK)U#`3Yk= z`po;TPD-i2-iqYv%fJ%-20V)Pk8jCq<S;~Hh|W4+$ACQ@wqD(QueZ60B;ID38D`59 z`Lvww9u5~n+~eGhkW$`i1&5NDbNF6z$S$YHh4<%>koiK6k$mdu+^hRuZ_9ts(zPp` z$&Hh6@^->ZHR8x|xECUMn<maH93UZ)N)^E=Z#RpCqe(KGbRh5gw*#;Fy_Sxe$N^;L zagXd4Yt9~5SwPVYvwG-yaqNOm6Zxnxm-Ja$<h_}IW-tj)q8zz!M#|=cAj;&HExRq7 z?c&|XHz4I2Y>seBS`s>$&K9DPB2&7AakN4c0=WmC_K!*qTwxDmGf@iJJIxGYk(Gn! zB5RpAGXjOpmhOmX&=Z1_Mdl@SvmPU(sBp*xpmwNr5$8F^VV2S)R~aRZtg!c!cR303 zw5pFKu%U^@tD++cn}m7}^f*q*T<jXiI(QS&U`*~r&AR(23Sd(~C2B{K36!lcY<N!G z!zd0DuNfATZ~}nR%<;&be4vuCnk4P}C`N`Y7VT+lvl+&=%mT?oW?KY{{ag>hD|jPM zqSebXuOpZisZKYWx!gS??O=lGq3I2U)2Jf3M46Ey2*R#X197ECk^`<@K@a{Flfkei zqg}uPD~RM&$!!6SXLgTreh3k8A5!_dD3o2b=dJn5M&&|fwo)A#L&{*TGG>|IOV;Je z73(?lEEFxv=%2S1@cV-GoP7mzFMxW8=iaj2vbQQ?aMx|}e=lzBA{G#U=bhZS&kA3y zfG1Pvadd$H$B+{I7*hUkASvykQQ`t+#|Koi1(Zo!(r&go&H=d-ZaPfzG((2w(ZI>@ z4ANP<Q3@!=tuS+bjG^Wq5Iia7cw8lQjl~Nr`22UaS%^ovLyB%m+=`g7(3Tj6w&*&; x2gd}s-)yEqI}TqX>T<4E#nWOIzx<1hoS$Eq9Kmlze#a*lC)Xz@C*P`C{{}PSUI73A -- GitLab