From 94a18a417422a880ce22d6495b2c2b4c63fce64c Mon Sep 17 00:00:00 2001 From: tgupta6 <tgupta6@illinois.edu> Date: Fri, 25 Mar 2016 11:51:13 -0500 Subject: [PATCH] Refactoring code to have glorot initialization in obj and atr classifiers and save only relevant variables --- .../answer_classifier/ans_data_io_helper.py | 21 +++- .../answer_classifier/ans_data_io_helper.pyc | Bin 5894 -> 6022 bytes .../answer_classifier/train_ans_classifier.py | 80 +++++++++++--- .../eval_atr_classifier.py | 28 +++-- .../eval_atr_classifier.pyc | Bin 3079 -> 3370 bytes .../train_atr_classifier.py | 83 +++++++++++---- .../train_atr_classifier.pyc | Bin 4119 -> 4291 bytes .../#obj_data_io_helper.py# | 82 --------------- .../.#obj_data_io_helper.py | 1 - .../object_classifiers/eval_obj_classifier.py | 33 ++++-- .../eval_obj_classifier.pyc | Bin 2950 -> 3188 bytes .../object_classifiers/obj_data_io_helper.pyc | Bin 4419 -> 4419 bytes .../train_obj_classifier.py | 53 ++++++++-- .../train_obj_classifier.pyc | Bin 3318 -> 3639 bytes classifiers/tf_graph_creation_helper.py | 99 +++++++++++++----- classifiers/tf_graph_creation_helper.pyc | Bin 8335 -> 10342 bytes classifiers/train_classifiers.py | 8 +- 17 files changed, 307 insertions(+), 181 deletions(-) delete mode 100644 classifiers/object_classifiers/#obj_data_io_helper.py# delete mode 120000 classifiers/object_classifiers/.#obj_data_io_helper.py diff --git a/classifiers/answer_classifier/ans_data_io_helper.py b/classifiers/answer_classifier/ans_data_io_helper.py index 5c55cde..50317d1 100644 --- a/classifiers/answer_classifier/ans_data_io_helper.py +++ b/classifiers/answer_classifier/ans_data_io_helper.py @@ -117,7 +117,7 @@ def ans_mini_batch_loader(qa_dict, region_anno_dict, ans_dict, vocab, region_score = np.zeros(shape=[1,count]) partition = np.zeros(shape=[count]) question_encodings = np.zeros(shape=[count, len(vocab)]) - + for i in xrange(start_index, start_index + batch_size): image_id = qa_dict[i].image_id @@ -132,14 +132,25 @@ def ans_mini_batch_loader(qa_dict, region_anno_dict, ans_dict, vocab, end1 = time.time() # print('Ranking Region: ' + str(end1-start1)) - + question_encoding_tmp = np.zeros(shape=[1, len(vocab)]) + for word in question[0:-1].split(): + if word not in vocab: word = 'unk' - question_encodings[0, vocab[word]] += 1 - + question_encoding_tmp[0, vocab[word]] += 1 + question_len = np.sum(question_encoding_tmp) + # print(question[0:-1].split()) + # print(question_len) + # print(question_encoding_tmp) + # print(vocab) + assert (not question_len==0) + + question_encoding_tmp /= question_len + for j in xrange(num_proposals): counter = j + (i-start_index)*num_proposals + proposal = regions[j] start2 = time.time() @@ -153,7 +164,7 @@ def ans_mini_batch_loader(qa_dict, region_anno_dict, ans_dict, vocab, region_score[0,counter] = proposal.score partition[counter] = i-start_index - question_encodings[counter,:] = question_encodings[0,:] + question_encodings[counter,:] = question_encoding_tmp score_start_id = (i-start_index)*num_proposals region_score[0, score_start_id:score_start_id+num_proposals] /= \ diff --git a/classifiers/answer_classifier/ans_data_io_helper.pyc b/classifiers/answer_classifier/ans_data_io_helper.pyc index 9ed62f9ffe32ff02671645bc90a02302313167b1..a82756a940860691e4e9f2f7fbb6be98c2d944f8 100644 GIT binary patch delta 678 zcmZ8eL2DCH5dPk*$!gfdY&N?|Hg#)k&}u>JK?LccV)Y=N3R*nGQoFHqYZJ411rKix z80;T-c=qa53tq*O2l1+S^W1}i{sHHuEfis9_U(M%%zX3S?&aMpw)MAI__hD%o&Bj; zY6B>+-(nx$-}_dyM6gKGg33?KMr}-3>dcY;T}nn0NCaXwf+Q#lV}T9A7$OVt{4SJ_ z_ypn{T8Yb9uDph<3B_h%%SQTD#CenH0V*_H&NJG0LRcp>xbJmqVenq8m{AqO;R)Fk z)D)9P*djQ?B1sP~g#kHz!%Y4G)+}>|ywv%E><ko}V|J)>x?0O3sJcluvT4Wyl7f+! zorSC-@gV~wH6+Yn^)`|QRMoI~d1EZ0#Z}=^**T~d;*;=I4s}(Aiu8qYYc!37CApKe zBJfHJhY}QT67e6So;k%Z_KGH)L7P^NNzG=CTJT}UELA%on)^Atm4gxg;SK%0{7^LY zRX1+(ZSaHcY|9dVZw`muRBrZrx6`zr>eucp&9?PtcYj{`3=ej?LxZk%d+Yspv$wG- zx1TM~F(RGg@Y8Njf2h0>kMsj?{t`tLk|R!uNSw}jR3J}OI69&t^6U=T;-c_GiNd|N kUO=L+Kl=B$)(IB<$J};#&4jKA9TQ@`7Q7I5^v~e@Km3h}xc~qF delta 537 zcmYjOJx?1!5Ph@9`I3`ualZ4N&mWL;5g8%@4Wfu8B83VeA-RZ)2nocIVx2@X9FmJY zC@iSxHhq2qLi~c{2c%2^Rm#vHls}MJV<e<EquV!cX5Q|dFP|?t*1wYb^Y-D&`B5sj z0k|CRIgT3Fze^T{1(8`$g$W!)V}XsG2h#Utc{<vJsv@01Y9o;z#^Bm2@;Vg5!ob1d zC8Y(!d<B$mw9Iog{v@z35T^obR(JS~`lG+#MVXB;@swNS4Jby@EUqyfM0pdcX2>{i zA#<@UU9&QuLso(Gkp+-d$PkJhIgk-lVB~DsfRN9dwrSTZsClF>P*pZ+Y8I-ghqK9P z3zBzu6Q@nE%kI>Is<4DllS`xk<4hPq%oJCiQ{CYW^`BYKlc$mVk(f(HkG<vzGC;;p z^%*13ws$~vJ@8U}=XG3u7XBT_Upj3aRPOBF8$RkK|2uuqzx<W9Ri`4oq&eYJg<MhO yT;M1QhZe}EvRJ(C1fihMdOh5t9eokLtM2iL-mXb(8@-Pv?dw0`3BA?3(d+-eaAzt2 diff --git a/classifiers/answer_classifier/train_ans_classifier.py b/classifiers/answer_classifier/train_ans_classifier.py index 1bf795b..a4dfdde 100644 --- a/classifiers/answer_classifier/train_ans_classifier.py +++ b/classifiers/answer_classifier/train_ans_classifier.py @@ -5,6 +5,7 @@ import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import math +import pdb import tensorflow as tf import object_classifiers.obj_data_io_helper as obj_data_loader import attribute_classifiers.atr_data_io_helper as atr_data_loader @@ -81,11 +82,11 @@ def train(train_params): # Save region crops if crop_n_save_regions == True: qa_anno_dict_test = ans_io_helper.parse_qa_anno(test_anno_filename) - ans_io_helper.save_regions(image_dir, image_regions_dir, - qa_anno_dict, region_anno_dict, + ans_io_helper.save_regions(image_dir, image_regions_dir, + qa_anno_dict, region_anno_dict, 1, 111351, 75, 75) - ans_io_helper.save_regions(image_dir, image_regions_dir, - qa_anno_dict_test, region_anno_dict, + ans_io_helper.save_regions(image_dir, image_regions_dir, + qa_anno_dict_test, region_anno_dict, 111352, 160725-111352+1, 75, 75) @@ -99,7 +100,7 @@ def train(train_params): atr_feat = tf.get_collection('atr_feat', scope='atr/conv2') # model restoration - obj_atr_saver = tf.train.Saver() +# obj_atr_saver = tf.train.Saver() model_to_restore = '/home/tanmay/Code/GenVQA/GenVQA/classifiers/' + \ 'saved_models/obj_atr_classifier-1' # obj_atr_saver.restore(sess, model_to_restore) @@ -120,18 +121,26 @@ def train(train_params): train_step = tf.train.AdamOptimizer(train_params['adam_lr']) \ .minimize(cross_entropy, var_list=vars_to_opt) - print(train_step.name) - vars_to_restore = tf.get_collection(tf.GraphKeys.VARIABLES,scope='obj') + \ - tf.get_collection(tf.GraphKeys.VARIABLES, scope='atr') + \ - tf.get_collection(tf.GraphKeys.VARIABLES, scope='ans/word_embed') + + word_embed = tf.get_collection(tf.GraphKeys.VARIABLES, scope='ans/word_embed') + vars_to_restore = \ + tf.get_collection(tf.GraphKeys.VARIABLES,scope='obj') + \ + tf.get_collection(tf.GraphKeys.VARIABLES, scope='atr') + \ + [word_embed[0]] + all_vars = tf.get_collection(tf.GraphKeys.VARIABLES) vars_to_init = [var for var in all_vars if var not in vars_to_restore] + vars_to_save = tf.get_collection(tf.GraphKeys.VARIABLES,scope='obj') + \ + tf.get_collection(tf.GraphKeys.VARIABLES, scope='atr') + \ + tf.get_collection(tf.GraphKeys.VARIABLES, scope='ans') + print('vars_to_save: ') + print([var.name for var in vars_to_save]) # Session saver saver = tf.train.Saver(vars_to_restore) - + saver2 = tf.train.Saver(vars_to_save) if restore_intermediate_model==True: intermediate_model = '/home/tanmay/Code/GenVQA/Exp_Results/' + \ 'Ans_Classifier/ans_classifier_question_only-9' @@ -155,6 +164,10 @@ def train(train_params): placeholders = [image_regions, questions, keep_prob, y, region_score] + # Variables to observe + W_fc2 = tf.get_collection(tf.GraphKeys.VARIABLES, scope='ans/fc2/W') + q_feat = tf.get_collection('q_feat', scope='ans/q_embed') + # Start Training # batch_size = 1 max_epoch = 10 @@ -180,25 +193,58 @@ def train(train_params): y: train_ans_labels, region_score: train_region_score, } + + + try: + assert (not np.any(np.isnan(q_feat[0].eval(feed_dict_train)))) + except AssertionError: + print('NaN in q_feat') + print(1+i*batch_size) + print(train_questions) + print(logits.eval(feed_dict_train)) + print(cross_entropy.eval(feed_dict_train)) + exit(1) + start1 = time.time() - _, current_train_batch_acc, y_avg_eval, y_pred_eval, logits_eval = \ - sess.run([train_step, accuracy, y_avg, y_pred, logits], + _, current_train_batch_acc, y_avg_eval, y_pred_eval, logits_eval, W_fc2_eval = \ + sess.run([train_step, accuracy, y_avg, y_pred, logits, W_fc2[0]], feed_dict=feed_dict_train) end1 = time.time() # print('Training Pass: ' + str(end1-start1)) train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] + \ current_train_batch_acc + # pdb.set_trace() + + + + try: + assert (not np.any(np.isnan(W_fc2_eval))) + except AssertionError: + print('NaN in W_fc2') + print(1+i*batch_size) + print(W_fc2_eval) + exit(1) + try: + assert (not np.any(np.isnan(logits_eval))) + except AssertionError: + print('NaN in logits') + print(1+i*batch_size) + print(y_avg_eval) + exit(1) + try: assert (not np.any(np.isnan(y_avg_eval))) except AssertionError: - print('Run NaNs coming') + print('NaN in y_avg') print(1+i*batch_size) + print(logits_eval) print(y_avg_eval) exit(1) if (i+1)%500==0: print(logits_eval[0:22,:]) print(train_region_score[0,0:22]) + print(train_ans_labels[0,:]) # print(train_ans_labels[0,:]) print(y_avg_eval[0,:]) # print(y_pred_eval) @@ -233,15 +279,15 @@ def train(train_params): savePath=os.path.join(outdir, 'acc_vs_epoch_q_o_atr.pdf')) - save_path = saver.save(sess, - os.path.join(outdir,'ans_classifier_question_obj_atr_only'), - global_step=epoch) + save_path = saver2.save(sess, + os.path.join(outdir,'ans_classifier_question_obj_atr'), + global_step=epoch) sess.close() tf.reset_default_graph() if __name__=='__main__': train_params = { - 'adam_lr' : 0.00001, + 'adam_lr' : 0.0001, } train(train_params) diff --git a/classifiers/attribute_classifiers/eval_atr_classifier.py b/classifiers/attribute_classifiers/eval_atr_classifier.py index 6431390..da1b343 100644 --- a/classifiers/attribute_classifiers/eval_atr_classifier.py +++ b/classifiers/attribute_classifiers/eval_atr_classifier.py @@ -15,13 +15,20 @@ def eval(eval_params): x, y, keep_prob = graph_creator.placeholder_inputs() _ = graph_creator.obj_comp_graph(x, 1.0) - obj_feat = tf.get_collection('obj_feat', scope='obj/conv2') - - y_pred = graph_creator.atr_comp_graph(x, keep_prob, obj_feat[0]) + g = tf.get_default_graph() + obj_feat = g.get_operation_by_name('obj/conv2/obj_feat') + y_pred = graph_creator.atr_comp_graph(x, keep_prob, obj_feat.outputs[0]) accuracy = graph_creator.evaluation(y, y_pred) - saver = tf.train.Saver() - saver.restore(sess, eval_params['model_name'] + '-' + str(eval_params['global_step'])) + # Object model restorer + vars_to_restore = tf.get_collection(tf.GraphKeys.VARIABLES, scope='obj') + \ + tf.get_collection(tf.GraphKeys.VARIABLES, scope='atr') + print('Variables to restore:') + print([var.name for var in vars_to_restore]) + + saver = tf.train.Saver(vars_to_restore) + saver.restore(sess, eval_params['model_name'] + '-' + \ + str(eval_params['global_step'])) mean_image = np.load(os.path.join(eval_params['out_dir'], 'mean_image.npy')) test_json_filename = eval_params['test_json'] @@ -36,12 +43,17 @@ def eval(eval_params): html_dir = eval_params['html_dir'] if not os.path.exists(html_dir): os.mkdir(html_dir) - html_writer = atr_data_loader.html_atr_table_writer(os.path.join(html_dir, 'index.html')) + + html_writer = atr_data_loader \ + .html_atr_table_writer(os.path.join(html_dir, 'index.html')) + col_dict = { 0: 'Grount Truth', 1: 'Prediction', 2: 'Image'} + html_writer.add_element(col_dict) + color_dict = { 0: 'red', # blanks are treated as red 1: 'green', @@ -51,7 +63,9 @@ def eval(eval_params): batch_size = 100 correct = 0 for i in range(50): - test_batch = atr_data_loader.atr_mini_batch_loader(test_json_data, image_dir, mean_image, 10000+i*batch_size, batch_size, 75, 75) + test_batch = atr_data_loader\ + .atr_mini_batch_loader(test_json_data, image_dir, mean_image, + 10000+i*batch_size, batch_size, 75, 75) feed_dict_test={x: test_batch[0], y: test_batch[1], keep_prob: 1.0} result = sess.run([accuracy, y_pred], feed_dict=feed_dict_test) correct = correct + result[0]*batch_size diff --git a/classifiers/attribute_classifiers/eval_atr_classifier.pyc b/classifiers/attribute_classifiers/eval_atr_classifier.pyc index 960456aa4a4ea6b14e9af628b1864cfd74c89fa3..3e013ff8e315f15dbf5bdb4a5e6996969f81c984 100644 GIT binary patch delta 1509 zcmZ`&&2Jk;6#u=ov$ofE>^OG(k;GqVn*@X^h?E0VK%pq9iV$f<!zD2r&&Fi)VaGd( z5^0tkB7`_WwKw<&5Z7Gc$i06A%7GICap1<Av7<^=v1exA_xrthZ@#JgTua3M&N{#D z|MM*IJl6)0=KUe>U?KkO`kjkQAhe-u_%?)vOG21{wBRS8ET}kC0)7%AhU#SuVbbVQ zP%)?^E-i#9NWgc@nTDT%h(k<pAbN$|*79=bKqRoX-UBX_g9lqVglQ=D&SKlb@h_~? z&Xe$SU;_;*1C@cFhaxCIBvBP9l;xD+90o@yGaQly_|uR@_$Bx=Mw@2ajK-?tWQ{0` zYB&j5=KPZolUn)RX9#nU745Ea#x@UGg}$IEoUK6R8R_c?3s41!9I9#x`w0v^(tO_V zW6ix|k9civ3Si@NjdzL@a$KegOciec)-IrmI#=+VH0IecOGkj5gFk;`$dwc!rf;e! zVJ`;3k{DJ+31WsnWrzw^L$>6~>rgW~F70tQsw2DF$Zpoyi8+XQtX_odR!3OTCSet4 zKap%eG_>!7tsf|?85~V;v5E`GLNHx}s-RfrL$vg|Ody=qkQ4{!N#V&^)#MD58X@;A z&d}y`_xu(V&rcFP3!=^S&v7ma=b@@l^CNd1oWZbUZT)OoBhslBP&BC6x(?mvw#lcH z)WJ8+>uU?UmKlbyabv<U|Hp(Yd6z>sv2P)`wq6VkU+gXT3#QPzxpN-$79<OALvaD) zGV9U5vDQU)G<mxGGE{@7jce9qlS%H3?#G>%BZ}ML#ZT&0O*_7}cx@hUSex2;5j=z> z?ifhi%te&=BG%URZ7xASVe{6OM`pm2r6qpgpYZe2zdzp#3jFEs_;<VgqtmzUGS%7j zT)C&Ua)OVb+aGu`HnK8bR}O;`^H1I3p1X741x?v+4!uD3hu+)LVa;J*cn6&$_week z`1?e!MJx99))Xsxp6m#3*F8Cqo!-zLd?|GxE#)LbPTN1~?3`;)PfoIiQ+2xi!@<}% z!J__&9Gu9&<nd13gA?NweQy_UrC7bDqdxM^1F5;E_a8sJ|G~!}J&|$dbl9uk><yxJ zR8HPolf=TFO!9eCx3rl#?+k{XFmkusJsG;)bD3lLsXGihvfmlkYA%Ci(I9zbXITF@ zd-Z#=MO$An4a!(0Do}=UG)IM4h0h6^wF*?Eg4HI6bva5=mh#lF9K+`=n<`d}68v|p z?3mKDO*T#QPEjrT)LF6L(Ak!wZ=Ek_G5W)4Qz^=(XX(voIo+ai^eEk=?dTx=<9`v@ BE1CcR delta 1198 zcmZ`(OK%%h6#mZGp7D&wcH(CyPU6^o2<W1yt+JsKi&ko-mP$=k1r{NgTsLHz#7TUK zvbZBlwOW={H9PnN2z8egYyJTL0f|)?AjG!ncgLxa5ZHHS&f`1pGxO8pcMD$buX6C) ztv9c{U#ndJC0@69jep6#`u&rMa-echc}ztT7urGW!Z<K_7#FI5XqrRfWeN`_2jgMt zASprvaS^ePDUkRuKH>mIU;yPJQXbm6lyOBm5wvM0Iz(KCt{|==o`abX>=PVVrvxQ* zK~V`&Qi7g`DM5u+_wq545V~f~4JCxqPdy7>!Rd9FGH!2PK~jaOLX{Dj3Z8m6E{NqT z8Go+1c~lUdjSYnf9)4~ERvD1#keVYk|31S8%)E^?J`|gC^PH6fK(`RL-{WXH4@Ky# zh#Gd;)0%DHnyN!J_-I14u$I(eSi=GgQy0i&HxJdG0T*V#C<CfRs1ETBs3jycE+A>) z^heRuFI(H&o8L>q44B6vSgc_Z0n`di8>Wdyn>W?9XX+tYuqhda;6ypzHA^-SO*qjt z#i8XHE3)H@J1|8%<sELuDon&!k}yIGhB;Yt>zv6@a_8=8-DYG%_0Y(&BDw=R=cOzj zgP0B!I}1E_@WjcMAz7M1HZ!OHgIR{5k7Yb{Fn;0uvovgVA2GEow3GFm3%LTlg18IA z2Ar4KNx#VVCc$j;Z227L5|@orvE(vIPR7n-_1_-V1@Lh7|7&PRFC3nYqd#TC8u#oH zin^Gk)PlN%CL3|*u5D%?_pH*sD|BaJz@rZ?^Zk3U!yA8pe5#9ljJ}Syo(u<C7s!ma zM~B0)KNDZw9vz%q+a-guxbQ&aV*0cD<&6sYouS^^9_{T7xApGmfDHl$`k12q!>yg; z!QnSLplEn9*!y;15ofQxha%ndZSiJdU8qVw^P}{czwg?w!g_f2x8IX{d!!+u)0CPt zok(g<RW3Vy3CLH)m$J-B*9kIyPF5r*9{&O5Gm5n1iYs;bFddh!ySCc(e)_ufm8_=a burH1D<FF$irT4?0w9<ocRUV};!yn!OxRcwI diff --git a/classifiers/attribute_classifiers/train_atr_classifier.py b/classifiers/attribute_classifiers/train_atr_classifier.py index 00fc144..e81e170 100644 --- a/classifiers/attribute_classifiers/train_atr_classifier.py +++ b/classifiers/attribute_classifiers/train_atr_classifier.py @@ -15,31 +15,47 @@ def train(train_params): x, y, keep_prob = graph_creator.placeholder_inputs() _ = graph_creator.obj_comp_graph(x, 1.0) - obj_feat = tf.get_collection('obj_feat', scope='obj/conv2') + g = tf.get_default_graph() + obj_feat = g.get_operation_by_name('obj/conv2/obj_feat') - # Session Saver - obj_saver = tf.train.Saver() + # Object model restorer + vars_to_restore = tf.get_collection(tf.GraphKeys.VARIABLES, scope='obj') + print('Variables to restore:') + print([var.name for var in vars_to_restore]) - # Restore obj network parameters - obj_saver.restore(sess, train_params['obj_model_name'] + '-' + str(train_params['obj_global_step'])) + obj_saver = tf.train.Saver(vars_to_restore) + obj_saver.restore(sess, train_params['obj_model_name'] + '-' + \ + str(train_params['obj_global_step'])) - y_pred = graph_creator.atr_comp_graph(x, keep_prob, obj_feat[0]) + y_pred = graph_creator.atr_comp_graph(x, keep_prob, obj_feat.outputs[0]) cross_entropy = graph_creator.loss(y, y_pred) + accuracy = graph_creator.evaluation(y, y_pred) + + + # Collect variables to save or optimize vars_to_opt = tf.get_collection(tf.GraphKeys.VARIABLES, scope='atr') - vars_to_restore = tf.get_collection(tf.GraphKeys.VARIABLES, scope='obj') - train_step = tf.train.AdamOptimizer(train_params['adam_lr']).minimize(cross_entropy, var_list=vars_to_opt) + vars_to_save = vars_to_opt + vars_to_restore + + print('Variables to optimize:') + print([var.name for var in vars_to_opt]) + print('Variables to save:') + print([var.name for var in vars_to_save]) + + # Object and Attribute model saver + obj_atr_saver = tf.train.Saver(vars_to_save) + # Add optimization op + train_step = tf.train.AdamOptimizer(train_params['adam_lr']) \ + .minimize(cross_entropy, var_list=vars_to_opt) + + # Collect variables to initialize all_vars = tf.get_collection(tf.GraphKeys.VARIABLES) vars_to_init = [var for var in all_vars if var not in vars_to_restore] - print('Variables that are being optimized: ' + ' '.join([var.name for var in vars_to_opt])) - print('Variables that will be initialized: ' + ' '.join([var.name for var in vars_to_init])) - accuracy = graph_creator.evaluation(y, y_pred) + print('Variables to initialize:') + print([var.name for var in vars_to_init]) + - # Session saver for atr variables - atr_saver = tf.train.Saver(vars_to_opt) - obj_atr_saver = tf.train.Saver(all_vars) - outdir = train_params['out_dir'] if not os.path.exists(outdir): os.mkdir(outdir) @@ -59,7 +75,9 @@ def train(train_params): image_dir = train_params['image_dir'] if train_params['mean_image']=='': print('Computing mean image') - mean_image = atr_data_loader.mean_image(train_json_data, image_dir, 1000, 100, img_height, img_width) + mean_image = atr_data_loader.mean_image(train_json_data, image_dir, + 1000, 100, + img_height, img_width) else: print('Loading mean image') mean_image = np.load(train_params['mean_image']) @@ -67,7 +85,11 @@ def train(train_params): # Val Data print('Loading validation data') - val_batch = atr_data_loader.atr_mini_batch_loader(train_json_data, image_dir, mean_image, 9501, 499, img_height, img_width) + val_batch = atr_data_loader.atr_mini_batch_loader(train_json_data, + image_dir, mean_image, + 9501, 499, + img_height, img_width) + feed_dict_val={x: val_batch[0], y: val_batch[1], keep_prob: 1.0} @@ -85,16 +107,31 @@ def train(train_params): for i in range(max_iter): if i%100==0: print('Iter: ' + str(i)) - train_batch = atr_data_loader.atr_mini_batch_loader(train_json_data, image_dir, mean_image, 1+i*batch_size, batch_size, img_height, img_width) + print('Val Acc: ' + str(accuracy.eval(feed_dict_val))) + + train_batch = atr_data_loader \ + .atr_mini_batch_loader(train_json_data, image_dir, mean_image, + 1+i*batch_size, batch_size, + img_height, img_width) feed_dict_train={x: train_batch[0], y: train_batch[1], keep_prob: 0.5} - _, current_train_batch_acc = sess.run([train_step, accuracy], feed_dict=feed_dict_train) - train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] + current_train_batch_acc + + _, current_train_batch_acc = sess.run([train_step, accuracy], + feed_dict=feed_dict_train) + train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] \ + + current_train_batch_acc train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] / max_iter val_acc_array_epoch[epoch] = accuracy.eval(feed_dict_val) - plotter.plot_accuracies(xdata=np.arange(0,epoch+1)+1, ydata_train=train_acc_array_epoch[0:epoch+1], ydata_val=val_acc_array_epoch[0:epoch+1], xlim=[1, max_epoch], ylim=[0, 1.], savePath=os.path.join(outdir,'acc_vs_epoch.pdf')) - _ = atr_saver.save(sess, os.path.join(outdir,'atr_classifier'), global_step=epoch) - _ = obj_atr_saver.save(sess, os.path.join(outdir,'obj_atr_classifier'), global_step=epoch) + + plotter.plot_accuracies(xdata=np.arange(0,epoch+1)+1, + ydata_train=train_acc_array_epoch[0:epoch+1], + ydata_val=val_acc_array_epoch[0:epoch+1], + xlim=[1, max_epoch], ylim=[0, 1.], + savePath=os.path.join(outdir, + 'acc_vs_epoch.pdf')) + + _ = obj_atr_saver.save(sess, os.path.join(outdir,'obj_atr_classifier'), + global_step=epoch) sess.close() diff --git a/classifiers/attribute_classifiers/train_atr_classifier.pyc b/classifiers/attribute_classifiers/train_atr_classifier.pyc index a80445734eae7f1bbe09a014ed07d0acab095d64..6635e32a225d8d0bcdc94f7d5551369481cc16e4 100644 GIT binary patch literal 4291 zcmcguOK%&=5w0Fe)Qd7D>Os9tNtP^IYbo&p4qzDBDBi$p>|&FZ5IG@sGZ=AN)QpES z^h`?@7c<B~d(SDqAg3Gx<P=~}`2{)U2W)@<L5=|yIVWG$3`J?3b40OOUENh(_0`uk zQ~Gvt{2Tw@$1N)UEd2fm-{cP%T>L8vh?>{3qNeKWEoxeQxkSxUUmhZVn40DObcFm- zYL4}%<J6q!%N1%)_T@X&toG$8YEJj%8Z~FgwdgB^F-v-m+!DREDE>X^S-K)pp+tGK zGlYn}|KRV_mV&s8?=-&8@lC!OCNf06Vv9_XvB)p6{CSBoi!zKuq8y?WIbj==!(uWl zCS^*?<Rd4Gt`ub>vXqab<Rdv`KZGTfjgmV|FFtA$rK9xW7gfr}C><kzoPK4|n{zC2 zAQR+Qcmq2dr*xeBNlF!^$fZn+?g-V*9hr^NBKFh~kK&-K9AAnkf$9{wV^~G(6B7G0 zr6o!yB=(AEYBw~K|10Ei2^lBhf0XJODlkz5f9KucAFCH1xs_tQ`tJH6)(6Q|CAld% z7U|ANI+K!)KTBzaTsfC9oBnQW3`>m|!y1lRC@@5buSY2US^-Z6^J|rX8r348nRf&F zI|5=zr)V*oB{N6<Jo$A>K^#TNrp20tVk$hErVIqbI4{xGU{@!zNNJti83y(@Kc{Sg z%o6#_lrBhyi)2=WBTIDoKBbEYW0x{4FOxe<i|LXC?OQyblX<c7?2T{X|J5tpv(J9P zan9qs6*;*M5-bSMzp>m!Ce14OYXoQM3b{+<!ari1qpLDys}wGfyTZHfN?>pW2MppW zv8vI}eVvnaS2>eZM&+&*&Nm9@(7SiZZS?(*P_{-Mh_xnk1O1+`xQ<Lp^aNK0Y<!Zz zVgrsqVSiJKd5=;6hKr;~xto+V$h}9|I=NevZAegCl;X^Fnr+Lj40Z^0n^lG8-aDEt zy8N9o_jwN-fB403Rklr+e<4<F<f=bYhRJ<KXdd^2TgCQ}yG?wb?(>#9NPJ(WljQGE zdY6^C9<79Nz6k{ogS!UJ)-e>21CkwqWM=>g#6BeO$&ftY3k@_<+{;O7qpd*m04J%> zrMYAeDFp`G0P-Q#bMe`mf0SPJwHq{7XkmXHX$oYw>6v*z{zGyf3Pigc2eb%*I|cu^ zyX<|SUU%OknBG)8;*E?pEcSQ>#bkN^He0^OY`LPcN~f4fNUFF<Fb0{v&&%#(KK{y5 z*)YBND^ft_@A0Kj+z%d!1ePD|ypL}DDeg7@{p*9IhM(xf-)}|X#p8WQ_NjJ^LAR!) z6?L^K<r|VY{2n{8=bQvOX_%-H>%>H{eu}b+3%e7!I<P~hqYbzJN2XdVw1eox3GBq^ zt_Kx~PshZ`JSMk%MqT4|yf5`riR;!<;#}aY5m-2`)3Jltj6%L}Vmt5>lPrAKl^1%( za{_iIR?)e!T`%@N#zr&FL+p5=?I%$v;pYClj?>n*=bAEh0FzU%ZN?zxdf1C;M?0Y{ zDu4^1W-&aEI^A>Qh3y8fHe}WNC(!c%J4rsDM2>rVL&De%yzPbEUQ&muzw-hKxQ_9n zuz?@PTit@cZ-K7I2n&8M`+2fwz>yco;|x81m+=0C7!pqBrP1-zM#AYFJ3-^1)q2_} zi14=$B+FB+U4Yce>wn2dnh9+0v1AKivM-+_IKz+p%fRc1&|`tC!nOLDW6qK(Oq^EB zzDR7{jap}WUH8-!l(WGAyA?P|;+=XrmV<6EWoHkzc;I5lO<snG-)R}|LLX@up>C5{ zZO2ac%x=YCZ4`^|Zs4@^SroWBw!IKVNv<i{LXm8-F#KlH*5HIbb<P7*Xqn}#1@mLU z-9G8%l;%qDN-GKijVN$DBl&`l|5*1DnH?V-zC8HxCqFq7Jmf0yP0EpD=?KWg!pGwJ z$X!TGEFlUagAkeQpsi%UKnGEh2>&#oKbP<Xf&*NLpB6VKr*Q}D=X)ghMTtbyML~qt z`jRh<T=C9prZoFT!l4LqA@b84S2RKhzRMh*Fd+bub$rN>$O&OrHgef>ACVWLMdcP8 zi+$pl)|s8#Ny_ngX!zO;uBm-ZUI|Tix~3=6mpVoYj9%=7C;<n@2ndYbATr>E^ct3J z(Cr*F@z9-^^iFQW)-B+t%?$g|5N=m~XBT=?;X(RB7lxf2xCCsI7I7(KPb&Rd>#p66 zqZ85DGPZ^M*FBUWLr#fpBKu~wp%WLpks*sE&>p;LSL)e9zPK=<iKA{$d=^Rd<dXD# zqFGA7{kg!;SArdLt#apsUdIhMcNYYVc5=&{D<P49a?Fan?X~T<=!R!PJFnBW&$QP* zGlJrvf!e2DpoQ&h#0(ORBojMtrO66&q)a#Lq{DD+IN9DI2cNTw{h-7-Z^X}aZ$jjO zgp+eBVVd*yj&qqe8A*|69utGR4SV7?<ixSl%hgPS+iGc0!rDB*fkD0^VR93bT6<E8 ze5V8=sSSdZ6~nz8bc~T+-ad<q0N*=UA{mljLfSuzI(pwYVaMt1KgW}F|Ah{ZKRf9E z3>sy!kLPpjotzteqkSg(Mbx*N>Rz{Zcn{tg*yN8GNL8#kRZ|sJEiJ2IwQSYYtg2Zn z(A9>P)wl&6>xXKkdF+`}TRba`V|}VrR%OLfRgI}hHKp!CKZW%Q^i^2SKr@Ur-dC|U zhlbU>#d2BAvwy75V1E^-ma*#&>^5<JRgJ(JtJbEP#%BTgacfOY!n@ScwmbZ&ws-h_ zA#a$y<d9!v4&@=i4FT^BT6%iNG290N?*z$9ci=Wqi2O3s4UFOE(v6MzqnryokUCw^ zUAj(q-T{n1z^c&B=rD=m(;#|#xX3yt*mY<0BHWMw50AqqyvA=O<}`n{<j*KPH~Kr4 zd66Hl(zgn;1I(plMdo~~kU6wDS^f`<k&~a{ZSFkKKa}^3C7fPCRrpiIr!QBmv0qoz Fe*xI?^3MPO delta 2013 zcmZuxOK%%h6#niwemk+__z@?LA4!~NAJC!{p&%+z)Jg~tK@fyUtcmY5%+$3T&uv=J z9lY?41*n<@OEyTAV1rt*LSn;TU_&J&wn#{Tkl4V2bH;WF4W99xbHDT5^SbBW@9Lk{ zqM<(~6TiRw%f0CLQyT!L>Hi}A-R8ZYxBJq73_(s{AcPlz0-Ok(C<cHRg^t3BK?`U= zhEb9c6jeN!MNt|E6bA-`GT<bj4d@Bz2xN>@A$T#cA_*OWj)90?Jg^@Q#uS`M$Ugaw z2fh=hbfiiIs^mDVslanG@a&|a<B%!nAu09bdWz1Q#37(3ab<579<8P~i5pS$s)FQw z1O20_@Y0YJ$`qVTKu-`oJ)%3)%sP5SaIROcNuoaHrBCy2k9eGT_C(^DaRyXU4j10^ z5b1NRDL{s30aZDJk~af211AS34?V*=_gBG4VKfyTB2L501tXDYnx%CGs3LR$k`@{G z<V|=*s988A=wjev7HTfo+7j+wgq}tJ%HAftIp{ga97?(rRA<ZJc|MqrG}}j82EDtx z5_tV>c32>X^MUIkHO4G%80dXz$P%tzQ}b}jAY*zSk}MI8g3MtMfmcRn7P8E&1vq`; zkS(&n6)1zT$~gNBrz;mY7hUF2$jZoib!44zS%s{P9Yo<Rz@~;<2-=W(a*M?}r4quH zq(+{=mREsXBtz8KeWw;|L>)TK6{1(jC3s|W8D0&t0k0lJwFph58ghpL%UcZ4C5A@G z(kWyacOM9~%p5X&W$$z0HE{Q9FjNnq{@#6fE6^)kqyn8BuHvJRTmc`m73NUe(ROo5 zPbN*V(6TCWMH)ucrQ}zG<X8Vqp2TtTtHb2i_&ihEnvPDUk3+fu2wEdo;$1qUVR-A% z6kvmDwvOUZ+&+2`+8v{H<VUE^z-d5NI8IHqXkc5dz#*sWK{3`i5JD*izA5-4H`w|Z z?xR?jo4j_6FmQ&MTv%FimNm3-ET8*tXPoDr7$73_G<OZ9H99uK#(9d>@@e)zFoYL| z&Yq(6slhHh$vgUta-|xr^HC9eC2j-~wf-Ax7wPJJm2MUO{q};2(WiUhG_PBxQem37 zt?n($jSu1Tt#0S|f|?qUZg!>BZg$L@mJ$S=RcZgnNHwNHL%n}4I=8z_@>^!lHV@jC zTT}0us%G}A+JR+v4r|?8%D!oTV9DodfrXm;KlD3xyG{I>-LaKzwukhGGCC>zeoRep zF#cb$N=aphZkwj+HCt`db?xi6)mz*2=i~cXrX5<U+3L32R!iC4j{j49J2pXZyX(6C zH}M7ka-v#e!0Fl@#X^u;f&}Z>Y~MD6Rr`tkT!tXJ6Nzvu;JN0p<^PcQN^JNaC7%!P zkOuyr$umny5{}KD+f>~qC9BdW<~_?*-Ja!}sfCxgV#HIa6MD`!*blSSy4^Ed@6!<i zI5~FLQ<F5~t$K|3AE){e_OO$AI5YX0xVT6%v)m79fJhm6krnw+I<#VxNSddo5z32( zSfeK!%8IPv-=C^nsvBW3BdVdWsEehEuqYT2!B2q}Wl1w5GW1;+OQbItjJRS|Z2IMN zCGre;+BxrEPTTosNmN`tWp&(c?|Qp?XQ%9co1Pb!{731!sQ7=UOJdP4WWET!lM?>` De3F-> diff --git a/classifiers/object_classifiers/#obj_data_io_helper.py# b/classifiers/object_classifiers/#obj_data_io_helper.py# deleted file mode 100644 index 16280b4..0000000 --- a/classifiers/object_classifiers/#obj_data_io_helper.py# +++ /dev/null @@ -1,82 +0,0 @@ -import json -import sys -import os -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np -import tensorflow as tf -from scipy import misc - -def obj_mini_batch_loader(json_data, image_dir, mean_image, start_index, batch_size, img_height = 100, img_width = 100, channels = 3): - - obj_images = np.empty(shape=[9 * batch_size, img_height / 3, img_width / 3, channels]) - obj_labels = np.zeros(shape=[9 * batch_size, 4]) - - for i in range(start_index, start_index + batch_size): - image_name = os.path.join(image_dir, str(i) + '.jpg') - image = misc.imresize(mpimg.imread(image_name), (img_height, img_width), interp='nearest') - crop_shape = np.array([image.shape[0], image.shape[1]]) / 3 - grid_config = json_data[i] - - counter = 0 - for grid_row in range(0, 3): - for grid_col in range(0, 3): - start_row = grid_row * crop_shape[0] - start_col = grid_col * crop_shape[1] - cropped_image = image[start_row:start_row + crop_shape[0], start_col:start_col + crop_shape[1], :] - - if np.ndim(mean_image) == 0: - obj_images[9 * (i - start_index) + counter, :, :, :] = cropped_image / 254.0 - else: - obj_images[9 * (i - start_index) + counter, :, :, :] = (cropped_image / 254.0) - mean_image - - obj_labels[9 * (i - start_index) + counter, grid_config[6 * grid_row + 2 * grid_col]] = 1 - counter = counter + 1 - - return (obj_images, obj_labels) - - -def mean_image_batch(json_data, image_dir, start_index, batch_size, img_height = 100, img_width = 100, channels = 3): - batch = obj_mini_batch_loader(json_data, image_dir, np.empty([]), start_index, batch_size, img_height, img_width, channels) - mean_image = np.mean(batch[0], 0) - return mean_image - - -def mean_image(json_data, image_dir, num_images, batch_size, img_height = 100, img_width = 100, channels = 3): - max_iter = np.floor(num_images / batch_size) - mean_image = np.zeros([img_height / 3, img_width / 3, channels]) - for i in range(max_iter.astype(np.int16)): - mean_image = mean_image + mean_image_batch(json_data, image_dir, 1 + i * batch_size, batch_size, img_height, img_width, channels) - - mean_image = mean_image / max_iter - return mean_image - - -class html_obj_table_writer: - - def __init__(self, filename): - self.filename = filename - self.html_file = open(self.filename, 'w') - self.html_file.write('<!DOCTYPE html>\n<html>\n<body>\n<table border="1" style="width:100%"> \n') - - def add_element(self, col_dict): - self.html_file.write(' <tr>\n') - for key in range(len(col_dict)): - self.html_file.write(' <td>{}</td>\n'.format(col_dict[key])) - - self.html_file.write(' </tr>\n') - - def image_tag(self, image_path, height, width): - return '<img src="{}" alt="IMAGE NOT FOUND!" height={} width={}>'.format(image_path, height, width) - - def close_file(self): - self.html_file.write('</table>\n</body>\n</html>') - self.html_file.close() - - -if __name__ == '__main__': - html_writer = html_obj_table_writer('/home/tanmay/Code/GenVQA/Exp_Results/Shape_Classifier_v_1/trial.html') - col_dict = {0: 'sam', - 1: html_writer.image_tag('something.png', 25, 25)} - html_writer.add_element(col_dict) - html_writer.close_file() diff --git a/classifiers/object_classifiers/.#obj_data_io_helper.py b/classifiers/object_classifiers/.#obj_data_io_helper.py deleted file mode 120000 index 8c52df0..0000000 --- a/classifiers/object_classifiers/.#obj_data_io_helper.py +++ /dev/null @@ -1 +0,0 @@ -tanmay@crunchy.15752:1450461082 \ No newline at end of file diff --git a/classifiers/object_classifiers/eval_obj_classifier.py b/classifiers/object_classifiers/eval_obj_classifier.py index 6d209df..89beda0 100644 --- a/classifiers/object_classifiers/eval_obj_classifier.py +++ b/classifiers/object_classifiers/eval_obj_classifier.py @@ -16,9 +16,17 @@ def eval(eval_params): x, y, keep_prob = graph_creator.placeholder_inputs() y_pred = graph_creator.obj_comp_graph(x, keep_prob) accuracy = graph_creator.evaluation(y, y_pred) - - saver = tf.train.Saver() - saver.restore(sess, eval_params['model_name'] + '-' + str(eval_params['global_step'])) + + # Collect variables + vars_to_restore = tf.get_collection(tf.GraphKeys.VARIABLES, scope='obj') + print('Variables to restore:') + print([var.name for var in vars_to_restore]) + + saver = tf.train.Saver(vars_to_restore) + saver.restore(sess, eval_params['model_name'] + '-' + \ + str(eval_params['global_step'])) + + print 'Loading mean image' mean_image = np.load(os.path.join(eval_params['out_dir'], 'mean_image.npy')) test_json_filename = eval_params['test_json'] with open(test_json_filename, 'r') as json_file: @@ -32,12 +40,17 @@ def eval(eval_params): html_dir = eval_params['html_dir'] if not os.path.exists(html_dir): os.mkdir(html_dir) - html_writer = shape_data_loader.html_obj_table_writer(os.path.join(html_dir, 'index.html')) + + html_writer = shape_data_loader \ + .html_obj_table_writer(os.path.join(html_dir, 'index.html')) + col_dict = { 0: 'Grount Truth', 1: 'Prediction', 2: 'Image'} + html_writer.add_element(col_dict) + shape_dict = { 0: 'blank', 1: 'rectangle', @@ -47,7 +60,8 @@ def eval(eval_params): batch_size = 100 correct = 0 for i in range(50): - test_batch = shape_data_loader.obj_mini_batch_loader(test_json_data, image_dir, mean_image, 10000 + i * batch_size, batch_size, 75, 75) + test_batch = shape_data_loader.obj_mini_batch_loader(test_json_data, + image_dir, mean_image, 10000 + i * batch_size, batch_size, 75, 75) feed_dict_test = {x: test_batch[0], y: test_batch[1], keep_prob: 1.0} result = sess.run([accuracy, y_pred], feed_dict=feed_dict_test) correct = correct + result[0] * batch_size @@ -57,12 +71,15 @@ def eval(eval_params): gt_id = np.argmax(test_batch[1][row, :]) pred_id = np.argmax(result[1][row, :]) if not gt_id == pred_id: - img_filename = os.path.join(html_dir, '{}_{}.png'.format(i, row)) - misc.imsave(img_filename, test_batch[0][row, :, :, :] + mean_image) + img_filename = os.path.join(html_dir, + '{}_{}.png'.format(i, row)) + misc.imsave(img_filename, + test_batch[0][row, :, :, :] + mean_image) col_dict = { 0: shape_dict[gt_id], 1: shape_dict[pred_id], - 2: html_writer.image_tag('{}_{}.png'.format(i, row), 25, 25)} + 2: html_writer.image_tag('{}_{}.png' \ + .format(i, row), 25, 25)} html_writer.add_element(col_dict) html_writer.close_file() diff --git a/classifiers/object_classifiers/eval_obj_classifier.pyc b/classifiers/object_classifiers/eval_obj_classifier.pyc index 8b6be912f6e9110be8d3644516b9293701cce808..6e3241b3601e7c0b97d1710b323fd4aef37d0f16 100644 GIT binary patch delta 1267 zcmZuxOK;mo5dLN<i4yg)DM}PgKjc>upuH3YT%hPBsSCSkkfJea^^!V@msP~*5sO6O z0!!#1q}L$erD*yC0_5nQQ55L0$6k6WkV|vvt+SMTXi@BvoSpe*=bM>be(%0*o5o*N zEBgKI3-j077JxE8AM+Fa_TrCwvls|Xghfb#6qpkwCZvI*<36BF96qQcDMA$ymhiQ} z#gI+f$wFAxfH@&4L6s0zpa=?(Ie0P;H?C%@0zAgJa{-d3cCI0;BVIwcig0FUm?C?W zz!1Hgm8mRvNf~histj4yw&!0WsUUW=yQ*Z+D&i*QKvKiy`%qP;`wkK|*C8wLR1Hr| zOxH-StJ!*K`RHPecy?!934HODW?kceD_Q6|ORLumTTrVyR`iTCXLq@zF%Y*Ade=Bk zWEFDdItd#C1Clj14rBw;;ZGB?g@c4svJtL9H87tMXpn<+7r^!c=w(2;2I({2fm}zz z&Nh-3E?-gXLvCnJhYx<F#LalDgOgRv--6tPVwz1jF2CfKo^umPTT@aT!sbc{9B6Uo zuxv254pfd~dAf(;22=qmkG_E1hVnQTB}|xW!JM?a-kiy>=IHpcp&98=J8&{qjP{}X z%uKSBJjIl`#=tXyZ$x$?l68hW%rLyE=J7umX2O|m;E6!=QY;V6qSU(xx3Wb2tmXpA zCgKi4CcFr%dHU(k#`erw96z1Dqc`+)SG2gnB6mjjV+ZozJIX!qoO2id+cfUk#Y@4n zdr=Tw?YyfGA<6xWQVa41np}vZV_nQW9BQM!Gy4k|aB}M=-@touUf;v>$<rV<SeiTv zqb7e(d(%Ph(J+WQ@uV{iqIfb5K1}DieK)e%e0S25gK@tz3VP$tVASgeSFdt+h;F2R z=0_jY7|{>nv-8Pt7@Wt0$v7^t>S)?~`uMZpyC_?o-oF3o?K^irIf-*@8}~-RRkv_L zw3WU#Klkgb`?fcY&f>}0GJM95b?M*cxS;<a_bXSU;wJfgb2zz*Xp&8~;ZvS`VUt6) z$frM*+J~Y*ZDEkf!xGh{q~!t?s6i#_&@TDxoEI)jyHw%PWc_<&P?<-Cy!1`UE!@&U j_tLWUkap6cwL?z&-16z&^tH83t@IbGL#OE*>xX{;RCn-F delta 1046 zcmZuwJ!=$E6g_wLV|Hiu`)hWx$tJrY5)o{K6k-(vNn^wn*&qZg9%?2;Sa*sDZ&+c` zR>B~tzra4FrGLOeNMmDRXQeiFo;SNui!g6y?wfnwx#xb&ul7sNPW>!8UvGUsu|JiT z0Jyww@*clF`L;U=Y)pa3hPDyppamLG0$*A1!hEtz!Ji7esRaog1TMk?f+B)a;!P8q z0iDD|WZ>~E{3wsG44sE!01L0~A#xB_%xUc;P|{Q}F~JKsz6R}LYyS$OB6JZd2VWO( zXyfTwvd<^`x#z~{3|zyZ0`BaYfDQxB)VQ?D=K^ccWfN<>DMoUAzLh6H*g()kIQg4N z8LDueMFj^bD9R{dL(x&;s|r=aMpT7n5-sSeA(n$QRDFhP%y7*Fr&>@R;Wel>q685& zaQt3E56U-3xAxyj)SM(j+F-Jp3FM#_pqtQjG(0|3$E>N1sAVK+vX)p03me9)&`eR< zB6OOuJk!0vht5K0@W_H%g7z3oA_izcw`ZpfH^uPF_Ri$EZX}sdT{IF|g!?dkrjmIO zAFMRj3kwgeWEi4_a|lU|jtd|iXi{`=Xkol({hJvUbQuA?%yWsg;N)0{77;EYpkih- zPt%V}>E0wan>>>~`?<hrV^@sXp~=peesuq%Oc>P)xTfsl1x*|6SzN=>M;V&K%jORd z)k`$7i+wa%i0vIy%@wqbGX#R(3<lg?U*+$!8~;eJonFmsiuB`A_UN=_4@8#YgWT=- zbM7dcCuZQBZs)sFeM}otu$oennpKud@pGp=l&+-2=H*CnZqc@~l9jq#mR0Exp0PYy zt5T4jTqnxqwI*%LA(12NEfR+*KE}_+gFfGbZv5MMAdB&Rw=cCgayyLn*6m3n{@||2 JZv4f4^9ON0w<7=m diff --git a/classifiers/object_classifiers/obj_data_io_helper.pyc b/classifiers/object_classifiers/obj_data_io_helper.pyc index ae5847b62758c1ea8aec0706d785fa461c734d1f..0351d49c5a5a5c01098023f5cf8035351f187977 100644 GIT binary patch delta 16 XcmX@CbXbX<`7<w<L+Y1}>^6b`F`fl| delta 16 XcmX@CbXbX<`7<w<R=}H$>^6b`FkS^p diff --git a/classifiers/object_classifiers/train_obj_classifier.py b/classifiers/object_classifiers/train_obj_classifier.py index b371127..ac3f85d 100644 --- a/classifiers/object_classifiers/train_obj_classifier.py +++ b/classifiers/object_classifiers/train_obj_classifier.py @@ -15,7 +15,6 @@ def train(train_params): x, y, keep_prob = graph_creator.placeholder_inputs() y_pred = graph_creator.obj_comp_graph(x, keep_prob) cross_entropy = graph_creator.loss(y, y_pred) - train_step = tf.train.AdamOptimizer(train_params['adam_lr']).minimize(cross_entropy) accuracy = graph_creator.evaluation(y, y_pred) outdir = train_params['out_dir'] @@ -37,19 +36,36 @@ def train(train_params): image_dir = train_params['image_dir'] if train_params['mean_image']=='': print('Computing mean image') - mean_image = shape_data_loader.mean_image(train_json_data, image_dir, 1000, 100, img_height, img_width) + mean_image = shape_data_loader.mean_image(train_json_data, + image_dir, 1000, 100, + img_height, img_width) else: print('Loading mean image') mean_image = np.load(train_params['mean_image']) np.save(os.path.join(outdir, 'mean_image.npy'), mean_image) # Val Data - val_batch = shape_data_loader.obj_mini_batch_loader(train_json_data, image_dir, mean_image, 9501, 499, img_height, img_width) + val_batch = shape_data_loader.obj_mini_batch_loader(train_json_data, + image_dir, mean_image, + 9501, 499, + img_height, img_width) feed_dict_val = {x: val_batch[0], y: val_batch[1], keep_prob: 1.0} + # Collect variables + all_vars = tf.get_collection(tf.GraphKeys.VARIABLES) + vars_to_save = tf.get_collection(tf.GraphKeys.VARIABLES, scope='obj') + + print('All variables:') + print([var.name for var in all_vars]) + print('Variables to save:') + print([var.name for var in vars_to_save]) + # Session Saver - saver = tf.train.Saver() + saver = tf.train.Saver(vars_to_save) + # Add optimization op + train_step = tf.train.AdamOptimizer(train_params['adam_lr']) \ + .minimize(cross_entropy) # Start Training sess.run(tf.initialize_all_variables()) batch_size = 10 @@ -64,15 +80,32 @@ def train(train_params): print('Iter: ' + str(i)) print('Val Acc: ' + str(accuracy.eval(feed_dict_val))) - train_batch = shape_data_loader.obj_mini_batch_loader(train_json_data, image_dir, mean_image, 1 + i * batch_size, batch_size, img_height, img_width) - feed_dict_train = {x: train_batch[0], y: train_batch[1], keep_prob: 0.5} - _, current_train_batch_acc = sess.run([train_step, accuracy], feed_dict=feed_dict_train) - train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] + current_train_batch_acc + train_batch = shape_data_loader \ + .obj_mini_batch_loader(train_json_data, image_dir, mean_image, + 1 + i * batch_size, batch_size, + img_height, img_width) + feed_dict_train = { + x: train_batch[0], + y: train_batch[1], + keep_prob: 0.5 + } + + _, current_train_batch_acc = sess.run([train_step, accuracy], + feed_dict=feed_dict_train) + train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] \ + + current_train_batch_acc train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] / max_iter val_acc_array_epoch[epoch] = accuracy.eval(feed_dict_val) - plotter.plot_accuracies(xdata=np.arange(0, epoch + 1) + 1, ydata_train=train_acc_array_epoch[0:epoch + 1], ydata_val=val_acc_array_epoch[0:epoch + 1], xlim=[1, max_epoch], ylim=[0, 1.0], savePath=os.path.join(outdir, 'acc_vs_epoch.pdf')) - save_path = saver.save(sess, os.path.join(outdir, 'obj_classifier'), global_step=epoch) + + plotter.plot_accuracies(xdata=np.arange(0, epoch + 1) + 1, + ydata_train=train_acc_array_epoch[0:epoch + 1], + ydata_val=val_acc_array_epoch[0:epoch + 1], + xlim=[1, max_epoch], ylim=[0, 1.0], + savePath=os.path.join(outdir, + 'acc_vs_epoch.pdf')) + save_path = saver.save(sess, os.path.join(outdir, 'obj_classifier'), + global_step=epoch) sess.close() tf.reset_default_graph() diff --git a/classifiers/object_classifiers/train_obj_classifier.pyc b/classifiers/object_classifiers/train_obj_classifier.pyc index b03422c5963b73ccb531ea7d02982f1893120892..4318b88313f9f71ae7931ceba42136ffeb86287b 100644 GIT binary patch delta 1644 zcmZWpOK%%h6#ni^;`gI={EXAYX&$7aJET;qQXtSOsfvnl)CLL4;O0&qQ^!s`qmoK@ z<b_!vu|U-bi7i6>0v0U0se}~Z2Y`gcf1v7u72myfXbKw7_?++DbIx}jGcW2t&cyA% zvZ?*wo<E8IoLdG^;QcOd|K!P^w+0rx7?ce)iJ^rs4h2YodYC{EhnIktgi2s2;13Ow z8mpqqgY-z6CsC?4{n7~1P-(~s9q_BW2r~#W@Ul=D9VW~o$YBVCIh?#PfXd>={nruZ zq4JQEsH+?v#?e1Ddd%o!%i8fN_}ku=zy}}dgmEUkU{WVgmq|Rd(0@zF6gD=4JUp6m zP!}LcCj&*2Nep8M3+UD$vph8j3WgeEF@mO7fQE`^)lcbiB?((l1^pB{aA_QvkR)s7 z8O4qxEaHIOEJ7AY-NLFCixh3FN(P(2YB+@e@XGo*@G9`CrZ1(J=m?Z0$TCeLEJIdE zb`MY`b}Nst0!7CfKCy88LTqfx>eycXzrA9y8%$V-Hw~pDBCMKZb>?rF_SH=B-V9XC z*lSP%-bHh2R!@sgLe>%1A*T^eLpBgLOsnRg7-JfxJz-pD4BFJt7-1N;^gJ?)lkY?@ z4>g$9J~!@vEy6jR{0faVM$l7)bZP3zRoF~D+G$%d`X!ChtIDz&nkpA{0cuKvBb>YW z-!T=nm#U*w<>TLR4Z?XdulbkrqIfp1+0ne1!8Aj$D{<^ukgWL<Q;CoH0xTe0gyPXT zPI(d4k+^pJyS06WHc%d;=c%fTtbYz`!6LjR$foJivL?b%<WS|d=|9Py%$|eiB?(=8 z1z6Arb;Iegs4ZMIEib*g#>;wvhlmMP)ho`b-X6c?qcwj`+vSSRKeR-+gyWxB1Z$@O z+<Y$4ijnvNZmnGAN90cQ_r&RDFsZ*MJG~<}uqpK(cz%xm>zz*XQM-T8e$a9KYkrZ? zy)(5L^qPMAk$a5<fVONOI-P#>Z1SnN9DN?!i%RjXh(*Kr)!RDKo*Otjy-vs72@ZPQ zAW3MW-#*&E?LPL6c5l6PYyF+~Zf*q=v~}BuuHh8)+Xr1EZ829rdLC;=&*I;T<!Co? zH#$vx8Pm*LvFM}Z;+qMI?M}xbWuIRxKnqULb2Lv=7=cl#I#`h65tG321NZ2MFOt_q z<q9$F>VLxrM8+zMq9{gxq~=$WA}R9Lq?i(Oc2dN6WyFGA77bzz!P_p{6_K=}OuF`d zjE8ePV2d=blwA~g5vO}mOj!$}z^g`UO3aF^l@fub?l^}m)^S!UR197JvEOP?X*Hwm z^rz)TjT_Q#*YEXrJH5|ZrRbOR&Fc%=QqzxJXHWOl+3CA&-D+pw?HsxNmWi3>iK9+0 SI0LUzZ(WJ*WWKlEjQ9rwTt;sI delta 1293 zcmZ8g&ube;6#iy6(n_mev3{;>#qwGXq~sI~?!`UTCCQ<I$~DfRsVW%7&Bl>!?UJ}K zgAO{j5W*5Dy)_{Oa?PQ~+*A5L^w>aeg`WG(NTPL-_U)VRy?OJ!Z{GgW`Md48f0g|E z2Y){Eel7O^tnm4iPn>!7`|T+ZWf6H$fD~xY1fm>N7Id_S#6gsYa-lMqInWF-&p}(~ zk@$!Th>B2olO4aXNNKSYV<x)LzO@Cr?-KVFk(8l|khb8{=>c>JNd?hmsFFRkjAR8f zKv!_S4OPb8z3b2nUxv)0ttxovVe-MQJ-c2+cF#V5|HDHGd~w$h&GEn$dvqRc>Eoe; z$=5;_aPwADMO1^DR-jfO$%n+r(#I?dUBx(nESt2tWyV^pU^J=1K#gVhP1CNd7;#k9 zSYZmcK$fgaNRl<3d1(*2h6hx$23aL_2h$zn*XX<0k#(p`*wJ+)^J@vL4h<;YO51{~ z1|bh28_-S2CUgt31szykYfu4H3-u#mwd5wU?HFi*tX@Q>Y-tVWFGX_2B(d_|z2`!A zaQ+(%L<R<W2hGJ5qf(R1jgGG!$urjt%9Pg4Tat%ynM2ftsvB^Qa}fTo35935ChDyz z?Z4uzTXEKx;_N88OKn!?+OW12LvdttIC3D_bC*YnuQ;C@&><9&4iyd2n!CGaN6z6A z4bWJihU#?5Rk3~tyU7Nk5VB)c+B8Inl0%DEYhIGP*Zu>Ck}a=Z0J<j82+kcFCc<kq zet2<aHqErm?jkEx!weMLT6|ISnd+Ii+%o6S9HBRH_9dHO?+k!r#tGT79;bc(+Esqg zcM^|wD2I;*$CGrJd7sAFcj9Wg?H%Paj3krcgK_$OZX^B48w)SZ=RUUh{_yc=lD^5k z5WV#C{6X1Y<__XybejI0{~?<t)c4YDf6MNO&5pOv{ZB*?FjC|blLl!Oorb9L*G>N@ zgyqo5h??lSq3F34H@#5|wm%X*5sIc$Wgr&U9bY6lZX66Ak=$T#t;t2=(!}4z{SM## zF#WOkO~W)ueP~AG_+)bb_~c2ynwCnRiLG?M6t2}xs6caie3A^l9UY&JCjA>+_k;9l K>6N=z6#oGJn)KxW diff --git a/classifiers/tf_graph_creation_helper.py b/classifiers/tf_graph_creation_helper.py index be5d854..aae6a23 100644 --- a/classifiers/tf_graph_creation_helper.py +++ b/classifiers/tf_graph_creation_helper.py @@ -3,17 +3,28 @@ import math import tensorflow as tf import answer_classifier.ans_data_io_helper as ans_io_helper -def weight_variable(shape, var_name = 'W', std=0.1): - initial = tf.truncated_normal(shape, stddev=std) - return tf.Variable(initial, name=var_name) +graph_config = { + 'num_objects': 4, + 'num_attributes': 4, + 'obj_feat_dim': 392, + 'atr_feat_dim': 392, +} +def weight_variable(tensor_shape, fan_in=None, var_name='W'): + if fan_in==None: + fan_in = reduce(lambda x, y: x*y, tensor_shape[0:-1]) + stddev = math.sqrt(2.0/fan_in) + print(stddev) + initial = tf.truncated_normal(shape=tensor_shape, mean=0.0, stddev=stddev) + return tf.Variable(initial_value=initial, name=var_name) -def bias_variable(shape, var_name = 'b'): - initial = tf.constant(0.001, shape=shape) - return tf.Variable(initial, name=var_name) +def bias_variable(tensor_shape, var_name='b'): + initial = tf.constant(value=0.0, shape=tensor_shape) + return tf.Variable(initial_value=initial, name=var_name) -def conv2d(x, W, var_name = 'W'): + +def conv2d(x, W, var_name = 'conv'): return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME', name=var_name) @@ -31,6 +42,7 @@ def placeholder_inputs(mode = 'gt'): if mode == 'no_gt': print 'No placeholder for ground truth' return (x, keep_prob) + def placeholder_inputs_ans(total_vocab_size, ans_vocab_size, mode='gt'): image_regions = tf.placeholder(tf.float32, shape=[None,25,25,3]) @@ -49,50 +61,89 @@ def placeholder_inputs_ans(total_vocab_size, ans_vocab_size, mode='gt'): def obj_comp_graph(x, keep_prob): with tf.name_scope('obj') as obj_graph: + with tf.name_scope('conv1') as conv1: + W_conv1 = weight_variable([5,5,3,4]) b_conv1 = bias_variable([4]) - h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1, name='h') + a_conv1 = tf.add(conv2d(x, W_conv1), b_conv1, name='a') + h_conv1 = tf.nn.relu(a_conv1, name='h') h_pool1 = max_pool_2x2(h_conv1) h_conv1_drop = tf.nn.dropout(h_pool1, keep_prob, name='h_pool_drop') + with tf.name_scope('conv2') as conv2: + W_conv2 = weight_variable([3,3,4,8]) b_conv2 = bias_variable([8]) - h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2, name='h') + a_conv2 = tf.add(conv2d(h_pool1, W_conv2), b_conv2, name='a') + h_conv2 = tf.nn.relu(a_conv2, name='h') h_pool2 = max_pool_2x2(h_conv2) h_pool2_drop = tf.nn.dropout(h_pool2, keep_prob, name='h_pool_drop') - h_pool2_drop_flat = tf.reshape(h_pool2_drop, [-1, 392], name='h_pool_drop_flat') + h_pool2_drop_shape = h_pool2_drop.get_shape() + obj_feat_dim = reduce(lambda f, g: f*g, + [dim.value for dim in h_pool2_drop_shape[1:]]) + obj_feat = tf.reshape(h_pool2_drop, [-1, obj_feat_dim], + name='obj_feat') + + print('Object feature dimension: ' + str(obj_feat_dim)) #392 + with tf.name_scope('fc1') as fc1: - W_fc1 = weight_variable([392, 4]) + + W_fc1 = weight_variable([obj_feat_dim, graph_config['num_objects']]) b_fc1 = bias_variable([4]) - y_pred = tf.nn.softmax(tf.matmul(h_pool2_drop_flat, W_fc1) + b_fc1) - tf.add_to_collection('obj_feat', h_pool2_drop_flat) + logits = tf.add(tf.matmul(obj_feat, W_fc1), b_fc1, name='logits') + + y_pred = tf.nn.softmax(logits, name='softmax') +# tf.add_to_collection('obj_feat', h_pool2_drop_flat) return y_pred def atr_comp_graph(x, keep_prob, obj_feat): with tf.name_scope('atr') as obj_graph: + with tf.name_scope('conv1') as conv1: + W_conv1 = weight_variable([5,5,3,4]) b_conv1 = bias_variable([4]) - h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1, name='h') + a_conv1 = tf.add(conv2d(x, W_conv1), b_conv1, name='a') + h_conv1 = tf.nn.relu(a_conv1, name='h') h_pool1 = max_pool_2x2(h_conv1) h_conv1_drop = tf.nn.dropout(h_pool1, keep_prob, name='h_pool_drop') + with tf.name_scope('conv2') as conv2: + W_conv2 = weight_variable([3,3,4,8]) b_conv2 = bias_variable([8]) - h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2, name='h') + a_conv2 = tf.add(conv2d(h_pool1, W_conv2), b_conv2, name='a') + h_conv2 = tf.nn.relu(a_conv2, name='h') h_pool2 = max_pool_2x2(h_conv2) h_pool2_drop = tf.nn.dropout(h_pool2, keep_prob, name='h_pool_drop') - h_pool2_drop_flat = tf.reshape(h_pool2_drop, [-1, 392], name='h_pool_drop_flat') + h_pool2_drop_shape = h_pool2_drop.get_shape() + atr_feat_dim = reduce(lambda f, g: f*g, + [dim.value for dim in h_pool2_drop_shape[1:]]) + atr_feat = tf.reshape(h_pool2_drop, [-1, atr_feat_dim], + name='atr_feat') + + print('Attribute feature dimension: ' + str(atr_feat_dim)) #392 + with tf.name_scope('fc1') as fc1: - W_obj_fc1 = weight_variable([392, 4], var_name='W_obj') - W_atr_fc1 = weight_variable([392, 4], var_name='W_atr') + + W_obj_fc1 = weight_variable([graph_config['obj_feat_dim'], + graph_config['num_attributes']], + var_name='W_obj') + W_atr_fc1 = weight_variable([atr_feat_dim, + graph_config['num_attributes']], + var_name='W_atr') b_fc1 = bias_variable([4]) - y_pred = tf.nn.softmax(tf.matmul(h_pool2_drop_flat, W_atr_fc1) + tf.matmul(obj_feat, W_obj_fc1) + b_fc1) - tf.add_to_collection('atr_feat', h_pool2_drop_flat) + logits_atr = tf.matmul(atr_feat, W_atr_fc1, name='logits_atr') + logits_obj = tf.matmul(obj_feat, W_obj_fc1, name='logits_obj') + logits = 0.5*logits_atr + 0.5*logits_obj + b_fc1 + + y_pred = tf.nn.softmax(logits, name='softmax') +# tf.add_to_collection('atr_feat', h_pool2_drop_flat) return y_pred + def ans_comp_graph(image_regions, questions, keep_prob, \ obj_feat, atr_feat, vocab, inv_vocab, ans_vocab_size): with tf.name_scope('ans') as ans_graph: @@ -104,7 +155,7 @@ def ans_comp_graph(image_regions, questions, keep_prob, \ with tf.name_scope('q_embed') as q_embed: q_feat = tf.matmul(questions, word_vecs) # q_feat = tf.truediv(q_feat, tf.cast(len(vocab),tf.float32)) - q_feat = tf.truediv(q_feat, tf.reduce_sum(questions,1,keep_dims=True)) + # q_feat = tf.truediv(q_feat, tf.reduce_sum(questions,1,keep_dims=True)) with tf.name_scope('conv1') as conv1: W_conv1 = weight_variable([5,5,3,4]) @@ -125,9 +176,9 @@ def ans_comp_graph(image_regions, questions, keep_prob, \ fc1_dim = 300 W_region_fc1 = weight_variable([392, fc1_dim], var_name='W_region') W_obj_fc1 = weight_variable([392, fc1_dim], var_name='W_obj', - std=math.sqrt(3/(392+ans_vocab_size))) + std=math.sqrt(3.0/(2.0*392.0+50.0+ans_vocab_size))) W_atr_fc1 = weight_variable([392, fc1_dim], var_name='W_atr', - std=math.sqrt(3/(392+ans_vocab_size))) + std=math.sqrt(3.0/(2.0*392.0+50.0+ans_vocab_size))) W_q_fc1 = weight_variable([50, fc1_dim], var_name='W_q', std=math.sqrt(3.0/(50.0+ans_vocab_size))) b_fc1 = bias_variable([fc1_dim]) @@ -135,7 +186,6 @@ def ans_comp_graph(image_regions, questions, keep_prob, \ h_tmp = tf.matmul(q_feat, W_q_fc1) + b_fc1 + \ tf.matmul(obj_feat, W_obj_fc1) + \ tf.matmul(atr_feat, W_atr_fc1) - #tf.matmul(h_pool2_drop_flat, W_region_fc1) + \ h_fc1 = tf.nn.relu(h_tmp, name='h') @@ -144,6 +194,7 @@ def ans_comp_graph(image_regions, questions, keep_prob, \ with tf.name_scope('fc2') as fc2: W_fc2 = weight_variable([fc1_dim, ans_vocab_size], std=math.sqrt(3.0/(fc1_dim))) + b_fc2 = bias_variable([ans_vocab_size]) logits = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 diff --git a/classifiers/tf_graph_creation_helper.pyc b/classifiers/tf_graph_creation_helper.pyc index 4defda24febbdc5564e8f0d5b4820a16d9ee72e0..9b27a5400c59cc016fa82b56436b143641f67169 100644 GIT binary patch literal 10342 zcmcgyTXP)8bw0C;I{_~47Xk!9f|nI76C#`_jw4HuC55sb!REjgRBPGe!A@f_(C*G+ zW|ksV*p#X$SGf|~Dm!`bLw-b3l^>8Fke58<A%3y{gY%wz-|3mbE(xnFmtYcewx_$N z`^-6~zjIFalz%@p@sGd#-*@Yh{kx3c9bCymNhH9ZBTbQ-lb1_UE9K>~)Q0l%upEy_ zZB#<kphH<+=Fbp*h9wxWKcn(;ZHyxZpyToh#+r}-f0yJFM{1WOn3P~j%P3ryprYj| z38p2O(K0Ggp4D<if;ssF%%Nsp3o{ZdXkk`@MJ>!pu%v}~36{06AZAg575Shf@k}YW zBDEzkqjJ0~PetAbODj^lBEhNzYqC^Z)wVS;tMU{qUe()mF;{uZ-q)nIA!bO9uk-oZ z4KX+5c$4K%|5F@O5pz?_HTGs<g@xd%1ncs&<h&07-jdpFsofDXE#{V(JE)ghRY!b5 zf@>0NNPwXor*>D&OxDNTmEeY~JjL^0%DZlA*PGh)MKQD5^)^S$dYc#ZAuwaWan8)s zF{yo7$GE32uq9?oj$hK6`>X-O&OUVZ@V&NL!(R0k%p?3i{M&`HMbb+k?QYA94vtMd zO;VZ=@BK85!-H;WlJKVv`c0x9eY`{Cr(O`Yw8Bs0vkLT*{dyU67}qGSH*h5>$f;T& zl~$D$WrxZ<1xIRZ*CDl@Bj7MC$#I#6hX)qGd#@~~WqI&+3KhW;hxM5e2}WszXn88- z-jJ{rmE&>>^PP~>66#BPRleD79ScG7YmfYlksoDgey>WNcdH;$Whd|^@>uB;Tg#^8 z0m#--)G}MC-){L&w%&*Wv-QBV-+lOc{!?%INfI80CQi1}L$4A0og=RvLt0_f_Kr-m zW8!<ACn=|YrRld00{_(t2gfC$5^ANv68`O^6k>AZcMM)nq-Fee-X`e?+tMH~kJ2d= z!*-a4e$#v8H@mib+i#gFRiF>=Ms1_d#3tz0vjP3|NE?%rIECEN^)Q|0Puy+S{nP|r zJBnL=Q#pRuk3;{UX{zK(RiB!662+e7ko+9_Z7*zV??-;@kpp*vy~#$#;RVT*a{SPQ zjiVGIu#8eH32j!KNoUQObB3LDM-_XZCXt(|n&>@rgP9(yWm=nrg}{h<C9PL1gjvx5 z9cx^C<6YH}3cXN`OZ~buJ(sgrIgdL7qJpVM?F1&2x^qR_JUc9sEU^xkC10q~Q5pxK zpA_QZgbD3RoMlJN0IWUI#Y>LIaj9hPqhKX-T*_7rJpe?J|244YbjeaUSb3%LUeyYR zdQSi%0U(6yN)4go2SM0w=#+b}zw_3!QlcTY+iI-jXgjbHq}jRTS4DL(QgUx#{8d~D znGi>rIPyAC)GSbh2CahGL7`tTnN%PN{FQ`K$$jZrjXG0+7mdgn+N;u5tPy>XgdZ7| zQpm~Ob&_i&8zh6K<X%Ue+EdGato$i>xhf=T_G09t%RZmc+kU)F{&HE7>qc6qo~b74 zUgh*l6s(*`VKnLqGV3W*PVkNZ|NcG%ezi7;kUk^k!h;jg6*@4?h%{hw7yy34p+kTL z9+qW~u4Po8{W(3wgSXj=rjqOhB70TZ_pUD0a0SJXHt*g6Np69>p{U(%Y;>A_-5f>D zz{DGeQM}QJqi#Fc079lm)_YRq?Z|7S35Bs6edZ%oHDCn;0z}{Fx-<@(k)M8L+vQ@_ zuDVECa5bvgluUnMOvmfQ(Sge~c0qcc9b^uu-sz^vCTbHliF4DL1*#HJr}S5GgT`E_ z{eN(Cj=n5t6lyOI^r70}%%9UTl$mLH__<cBv7&W^Du$)dWd5CD>v<*vL&yfhGK9SV zv=D8eJR*m~FdU3v4d=fz!zucp!_a4#k1pQNX+tg*`1vfyeF2ZScS+PvD0S<$qm&Uq zRQC@&f{6xlejAWqRJmVbXFe9T{D$#j(?D#JxG%Dr)^*Y~3B#2{m%45Cl6n*yZEmC< z2K>+<h{Hc6U#OQNboDKY%<een7FrTETiaWA)=Sp@=B)h@eXihQAfT}GRn%BZ0^q=~ zKHbcHxq2_w{DRRX0w2P2?!Xj|)Cy5EEDe3OXUz_#sqrW51@S5j2NnifK_<`}(YZ$C zSnY`QV~R?nL}?foEqg5QgD@49U_(PzK0Yq{u;PdBWeiU!!@rl_gtBKn38uSb(NcPs zBn5l8wd_sm$d)~fd`a0u3RbW;l}`=!kd77XT~_ukD|?l^57?^=vNtWK({c>1M0zt! z7?9|Ff>nnY8Y$b9Tzm6}73s~&>8u=2%8yENa_m4ty*V9niccdQJe}jh<Kz{U8B)4w zfsvvf;#qp|_J9m#R0iLb-U4NSJ}L6WSy|DwE4@XPK%t<eTmrxja62!jiz<XAoevX0 z$jf~~hNfoZbXi;HF_Y6<$y!fW;2joVMY|BOCOjJBF%-EU671DgDE6yU)`-$qdZek8 zjTgKijylDY+aXtJ5kt^e|F}%#PzP>Y%yM%H*2)Kd#l{}YKs2z(GrfeX>N}bmZZPxf z#%2S_A7X#N#Md@dNQd?NE{D)~sTnmwWRV;@i4IeQ3sw3I8v-+WVqQQ@=@@sJ4Qn7e z3<5)qz-)9wR~egT*S*bl4o8tj-P9tHNp0rgGF^4ar1pu8%}1y^gq{jxStb^>55tDC z*z(g>x9RSnPnA#FxTyd^zPdi+zKlj4cHdJ2(DFfF+3zbO*`twYA8Q2nb^5GcEyDfU z;E3COBigbZ(U$F#d6{A8b~d7xt&LH*cDik?$PzLN0D5w1Ywp)cUIkH$-1nd`Egi7r zzQzVd6HgFWnBZcJO}!B%TaQ{DD_$~`P{iWQmS#)KPQ|&7WMtC0K5(~+nsH~-S#ZWn z3uswHzq_UNlExeilMBrM_*u;VphSe%cme@uhWWtBb1@$v-;eng(hJNVRy+ZC6`%@i z$H*4*u^mx>DsUbkO#BxBd)5b_iobr$pXh@l)EAf!YzKSKV?IE>AM-7wTlO&Wq_StR z-MS=Td%+&I-3s=A^I#7U4EBJ(whu-CZ1%GUc$~>Ge|8Y_u{)Pz{v45c0Q2W{NMJrZ z1F(I55c3yu%qQsn0nGokqC4_#3;6t*Q{2ZcA@&Ce)_{BX1WYy12FOR-@}L@5lpnm@ zLXP)K>J6^s^%%%{3G5)kOJL)0MNY5CJbzYYehCg?skhdrEbBYgRHRpQ%B#8WxC-CF z{L%Uj?2RdcoaZ*&zd|?nt0aF-LR|P0oP_%~eE1cRi&?emB0j?K39e^dM85ZzTt(aK z`EKEPUP7_QLJ{*rP2n>r0wiQ5442YBSO<db$AMe)L%hL%zkAIhci~ms*HL@sOWbep z_67;lWcLjcIuZ9xlD9|-r{VrBZy%7nP4Z2W9gwOf;B@#J9a{+f7H^4I?zc()j^rJZ zU6SvRe3yjUbRUw?FS_)x?)ON(Px3CwKFNC|L~8d(Bm_kDZu{JfRg`8>zE4>wQrFna zCpjP?aJxSMxftc!1Oii^LAJqU3B~yNAbSF7;<z)Sv;v4<cGd{sV;F~t48tvIDfu_Z zf`|klvO)X5l(VoIH5S0%$w5^y9FT}O98dzlWN--#w;RQPyW6^f%so7W(F${?II9@9 zQ0pLu9*8#)G$SQgk)92v5h!EBKqKki)L>6QEqZ<=J&m3Lh5Xagjx2cq!eW+69-Ui| zoseT7`=oB6G5#gsE|HsQg%E_*ikTQ1fi&601HexN)p#v*Lp%(0nj2(gR#^dH0t#^^ z116LY;AO@Nf@eI7Tbn2#YPKU=Rxmj56s#=vvjT7hD?mxG0x&IDSsrA?0xL3w6&~LJ zm??iu#=zU@T^VFlNBi`y!t1gCPRTA1c}<_f2sw~GTd(IhXF9DinOfq(z*DExTYqkt z4IcZPb-z2%{hD@1vR}-4eX#qrf$kfW9s63@T~|?P0(H8fQoAloln$z3g**`|?lA|> zjBww(kqhvWoZe9RSbcBi_0aKVF4!9?x|@Td+f*Gtl-@0}4}Ks4in{MTQE5fu^lmHD z=wX?@lQVr&PH!vIcfeG&Zw8ndoMTnB{et#}<3?{Nq=X$%ydUmwF>U1i=?m=3T{OFR z0`8@8Qz7$s*f=1Be;CDqXIcj)uv=`~sMDi>48q5DvchXw;XmRXn@th#^yx>YZj*?U zY#_q!v!3)E|EVWi_P}ZyPxNJD-eJ>E6=TB};f1UdA_VPc`yj4|8_h&;vXU_<8G|1` z&RPEEj{D=h-@bQ}?GbCbM<hS2Z)+Nu?VT`X(k>@@OlC*@*EApXr&(e8AI_;c%bTo+ zqr+47nx^eC=ymyqdXixMR*_EiIX?A+XWmcU-3|xhE+jH8FO7ij%_ffikkC~L$L>oc z_eq!&S=UDgrH)#6lhjLv?MHTR$=zZjRmbfnn>z7L^+3ZtVksm!A$i7qezxcQ9-k<j zrt&<H>l7;0JPOH7aWI)HRsCk2!@ySaWDd$e-a~JyuXN<4Et{+#snewiYsuzpQQnV< z)3!dr#A*A_<9{L1GynTfU?pMvAf;Js!LB-s&MHdFL%5I@-LlD3QC@X!qICrK3(l3| zs+2dKx$-j8EIhMVs+87{R!uw8r8?sszWoR;^~isZ0zp7MGF%b1VxN{J+YnQy%u0X_ zDzO)j4Wdzb{FfS@tG|Bphj+O11<b;+E(ZcMVw-kMR=DwsAD{}*+&THW?6VoWT=2Wh zVCd(OGwtc|&o1r9#-Dn&WOozDZPICmc7;8TaX@NMIsHz@v;$ofy7-Wo<3O{@H`92@ zB_JtM9r$Ve$ZnT%Ta@SC*j(>KiQi1DS#zV_CdOVfOj5PM{*6&v%b}lm{-cJy1-?kA zmZFsx6rXYV`;7*;!D+2$nfx!*B@Ax``d)zU=U^4HPQ8ShAzYN1YL>C5)hwxMmO)p3 z-pOQ8Z+TKvM!l0Ag%)bGJvA?8AA61iPl(eOJtW?>8{(6khtP#x<QOQUdkmqX4g48i zpkD9Be*MWadNn6qoIKMG{kVZmqzqJfdhPKj98MlX`bnx=;r3vKy{&$K&eAVHE=clJ zx8b?39`64JcL^sK=OrACJTGe^n3XlwPbO}4(DRV72DZ+nLcCmJ2)&+@GXx7otnl#> z>Ir{)`rf<Nb<%$8ag-#UX{WH^C%R_z8-TOTH+%l?CEb?wBvC^>w8AG&wM-B`a({(^ z&Xwvvv*9No7bI1xw22q^9ey);ZUUjyOgyjUhi%VG?xFDyev0wd<BsQ=q}xoBt!BeJ z44Wp|N;W?0k?bRmWSx`pYm3AS{M1iOx|PP9(r>q;d&f!C*3%O`u~C<(%Uky`6S_WR zY+Kv_+*Yw`LZ|h}-E4hTL5J%PD1+}nxFXvfb>s>~_InDP0e)!JbrbFOw8cC7wB#N= zEcx@oDEp>F7uPw}v<Frf9c_lo>b_+8EI#-$8~%dieG)_RG0D$J=y}{<gJjy!U0hEW zC<GmZG4^6;e~5Ps4{T6Xw_CNmu|We-(M#tzNFiC(5eL&_sg55f#yjy<=xUTG7b5}P zvw#DOK-?||qC6k)Qybj{Q9<G>hrk^AFakGbzh}Z3Cd``Ja=5e5$Le$xjTT6^RfWof zt@w(T_Y6Mri7fmiDC;u92Qw*yuk0fi9KLyelV9W5Pf?Py^`fOO^vrAw^S**J;%?Kt zN&zQ~s3s8zOgfcP*%{UU6XnYI<nZJW{!dO;#_x>Z8-I0te0*elWYQUd^GCl@#aY0a T+EnQ-mfQjYiv^5uS?m5E0M{)Z literal 8335 zcmbtZOLH7o6+S(BTTkm@NwzG@mLFp$vEm8go!G&4oCGZ57Fb~_sZMM9YSi+~^r*Wf zTd5f+%2}|XQrQ6umQXCHV!?)DlOMo}A`5;3D+;ou3cl~$p3&%0#sX>P&h7g=_dLFH z?&+>ChEIGt_RohU$^J(0dkf#>3r8ft$B~Li(P`ClQp~mLT~h3B)q7;KSBiZSptnnc zZuuA^`z7d+pjSS2q&T2meOf;u0lEgXJ}3bm2em$=^&zbfOE4@SW9tzKMzk_2!KhZo zBpB1mNeNDBWn6-BtxSlSlwd+0<s|;Q6HH2RN=%<@o|2~`?}MaiDb7eRCBZ3~F3xJ- zX)&|%6eOO}$2l=)_{h;`r8qC9TQ<*eesMv}f^42={gXe5V}`|C5ObEJS(#=fn3iBh zp5~nQ!Jb7aUX<b`F(YCY#au$W6ff(FS0tE~;IstX%qix@jAmm@UV=H9eTw;4TSL$4 z(0LttP0W}My~q`_(dL>?0x<>@w}^S#FU1#hjTiL*FNt|cHm_^T5?es9vkDnMc(10i zUdf+>@WX%OZ#jhq$ZGi)Kj+^cZkD?6*qwcE<2T8pNJTasY3Srej`TIVv@s_Q=mAQH zjVpQ1UQT;CFAV~-lX9t=Up3m3=D5~+I>w)PyH@g36L_^KuKJa<AB_+FIP}*lCeLYU z4~k^NuN&>#@nerGrQ8Fnr+xShYhfDt71AhLBxg`d*S&J=*EhUUZ2UBgYTkya)J=T7 zzMGDs|FH?n8>xpCbvGrKJBl;qC_mOpq_o9~18e{zd1A8FY35pNP>X1>fr~Ad|Mi=H z{Q3UcyEk2K;BrOfg3ERFNTsNjq<$@Rc~aY_LwPcHfNl6b=edS4axL_e_NfNZkhD&v zV&%SERnKz+6)V<FJq$><2gBLyrwp@KBZqlY=((<yJjq*D&L?Rc1||uqKH5<CgCMMx zRpKje+`oJ9z&wAgrn-T1?-T;75}g0UMIkPa{ro3($1$9mlyGHnbma}N9z_+E9$Qb~ ztEat%O6O^_qvCj~zC0>E7S-A<ifTchRqpPUd_NXhR;_%LgdZ6_ddO{D;;Bo`a!JsE zle;I;rt+@(Pn5pAPMYA*<DEGj!kpGBUg1e$63?ykbJ22IB10*5uB>PO6cu}RmN=t@ zc`N2F!P%+E&b~1~45&5@zFSc?CdS`;>xgA~rQ8Aps1VQqs<PfCE1cV>bAL)daPKYl zqAMpWfyhdpiny$n5ze9-QaRlj6v-tNx7BcK<%N31FPV*~5}0^lJ&G5~akO0v7Jwe< zM#y8TJZh0wP7{LKa`en83M|$DX&)V{s}8SMB0qhlpgO!9)iNC7Hq|IFE+v+d@JGhf zy?Pw2DO&74ztW~u+KyeluvXtr6FoP(#JT8<J3~&7Gw%%RuUalOuPx8N;^7EE9FV8Q zXDd|-NSfV8%z~6M@%0?)Rj;4*_EL6G0ealu)7c-T3y^R&95w?*0Bm$i1NB~6?}0{O z1*;LCXByESg8|*o&<nM+V=9MBtva4cxTi72r2}#2P^i6IUlpZ(#oLKW{+dV2PigOd z4OrW5a?heEB?yOAzihnNl;L+0cb?5unXPS;&=n<W!?xQ?N>OaIyPSGh@Ue-Hp-ZQ* z>|tHvI*aH@*lqP`-kHx?otm&ZMW~p>m*-H7_#;rm>J-oc%JRvtTCPo<9a}yU`h3XH zf+n=1$6|+S78>!WbDLI`5Ko~3#Sy9jLL%tHG&Bb~K)mUt*MMeFGy8OJOz2m2!Il`- z&+w|*FROry2k&LX4JdIxljeYuVqqINeWFFm2|;|oM@!0JYh_Ccc08e^40e(-q@)Zf zDZ{M^AZ3`yzemc5b`3}au`1%(Au2n2hn3LL{e+Gvp?PVJDWQmY!5(XeCzZrDx5hgr zf(1ALP<l`rqtZC3w2sR(nKYptSO^`X>_A9I&NL=~rgS^Y8W}}vn+l2au{S~n77DG3 zmFk!jvNlq}pY@hr5J&a)oI*&j(bzEj3AKVtcMSB_D}Jgwt(R`7SHq6pI^wK6wclbZ z{VroQDtO(vd~~%}T_N!_HkEDl;O;b@6nsfWw4G`_Ha74h=q1s53ZJL`z3Qjc?MljJ z5N>#B<dvdI#gu4Sd1|y`^q$p`HVp;ctF3KTF$JvAZPq&Kh!)%J_PjULRI_&FlKbNR zRSF%e*t#9vsxufVWUFZ1@|xt^EehJ8Jaw;gZ3=AFBWqb((;9=WUAP+)90MF^+B|(J zs@5$F3TV-2Uf^QJof&7;xdb>k-%&rK?q?if@CE0TqppW2(uRq@9fXO$v@QajFabw% zBuu~tJ7EF{08Q_Ni5}JW-cH*DG{9&H83HWGCcrlF>9oy(4kJQ)8zx{)AmuPj02ex8 z0tnDa3Kkz!QieK70W^RVSUX6uSYQ!rWIrjRx)xvpH`&85F}9yj3lncj6QBS>TQC6| z$K{tWIew@epn!IuK_@RKJC^MPkud>004sPrU<yEZoR8CuNee1d+JkM!rGX9iKxGP` z!f?leil4@6Ku6(;dkw?f7g*3b4#0=|5<95c8tP{H-2$mq4=XE(;P(KCxG!_ke&}#- zu#JI|!iGzgc3)xfB^Ix;_%ez-wRR5*+$BDKg#}^2{VI#EvG_WRZ?O0#3m(9Ijm7IM zZnC(=;tdwxVxd+_hra45@WE2;lb)b-!@a{%cTpVESUG>sSl`A12|4@UjCBTvJmnN< zt^{sQnZj4S`sWC$DS23g_3EE>Wk%FC*Zb;9F-9#GIt4Ec1&7JdyTM}BKI!Hz%M?1E zO0H>L6g)dpI!O4yqe3AEJ2VjUVLyl&UX!LZEf@~Me>JM6b+PJSNgON%<Dr6Rr*n3o zg&4EX?OQWJ1OmH*O=*q@27<hh-d>~$qr(b#v~C|x7b_3TCdR{EA_7s52^F6v#jpWT z43mR-(42&3W+$Aag)maZFj@{RZ67I<dN4#FlS+zasjxAUO&KXLDUgCkOUkL%%9a!? zK1DlgATzZ`%CwTAnWg4TYXV4_*-y%>c3}zRluj`RMHzum%|ta%?<Z8VU7uWYnrE0* zYR)OS)6$sJS!kS7E}T8ojx5xCOYS%xru50Q%HGK}(E|Z=E_3c#!_Vn(+ydIWE_4is zR-NCs`T}{!v6gq|^}GtYjRob}d6}l5&;(6Iq6|=$J!21~@9^Bb&|)Bf`GWGt%6qZZ z4#qCF7<&P8oaV*-tXotWKal1nk`H>o0kXQy7U=$*H3#U?WhELTEYVk5L|>G~WhMFw zh{|{D0KJ@$MqU-~s*XlB6QfUaw8Qh@`I5RCJU6a#Ebq-$4-xdO<gdWQGib(*f*|=g ziUZG7*Gyn9)e4Pp*HXq8fBm<z%FV3uS$xaJgM=@gy<<v=1~glCA)DJ_x88Syu$mm{ z1&BG%;Q+!HL5}qTM5@*7j^w_@>1=TcdVnDIZDrKHeJjiBXpqXj^-@8D*X#<#$Ww=G zssFIkewW_L69qLIwT^^Ar><QwHGmq^DCve)lC9Wg;M9L81CU-YLAaxgMcB1nLL|Fg z)zRNW*YUiPIwa-MB1yW)g4aUpmEJ*3y`tVF)t7{|9ecBMFR+sW;APXsvESuPHo5Pz zc);R47FSvP-=3`xt04OHAZ(3WTT)^m>VBJBd<TUx2}07=J8WRIy5Xf&dy&}CxRq+D zH5)rWY>i`lTWH6(1(jYUDu-$EKlx3tX3uYah)ojmLvjpq&%iI9VJ&wJWjo~Us3YXX zQ?Gi_)9|GBJKDAAOmxpU=g~KcHAZuDx$)eXGnOmyq&yj;^4s_(KSV*Jg%{Hl7u?O$ zF3E1eXav~f18_hivSq^i?vqy;*P$KG=gtRL7~2A4Hf_=dPzHeMmsw^|@B>u9FlXyE zS>-;s?qVN=#PE}Fo&$gb+Qo8+Leyq8O>1vrq&BHnLTe*WV!u{4%5cA4H?_d3Jdc9Q z57O~@o=m0SUP4U`V9ifU8#eF3J6vtM%G6gqO8iP<HI#QrdqMFkVUp_gtTV%5Te<YE z=kJv5Be5w|Ezrh19-lMS%^Rs-E@KD0Kk)3vzeHQYdkS;-Q;_EbE;r*&iJ1;sJNcl} zqHb7ev4jltr3QGJJ*pJ(Xp_No4z{Z;8vCeM6^^mlQ+OlfJoDUT*=DP_v1a>_;)!uV z)%#nh7gZbZmP*^PU)nt&E3>tYcOevrAD4OSC2unbsv6%ySIJLO%~RQQCi9+FW=~k# zMR5#nQY>c2uWaj^l3(K~;r8OZjLYC*{(T=^mVe+}n`h9^NoXY)g2-yH%`%z-qN_u^ z1GA7un&ot4fG`Q(P0zAyT@v-I6h}$o0j!~GyQ<rHf|G^x4@d>I9=Ks&ue{H`CJQF; z4l?FP?5MK?-{({_s!atn{1(3%OBMkJDm~Au`eDuU5?%+EHlnIo!i$`$zq@oBiQ=Vu zruOi`8%uYe)IHZENLVCGm9n=UR!p*#EIeyUHZ!4v&=g4G1%B!$CS6KnZt2%*(e=$F zs%geivz+Qk)RHvalF&QgZnDIDMnO5}zQg6%j$4Lp&%cZ(odko%dok*3t?m}tN|i`W zEs0_z#2;HAq1&<<O0QJ$lO$XZ@j{J$iS&74l)Yn9oj;<PB4u-XV22akyU;CQlzWB6 zn=J0Lc*No{3nH!iV-%STG{xju|51j^iuLWDzVtIpgQBUFw*9Adu!J2i)l7bypDG!B zw@51Mew-L@%U4$0c*x&d3GkUYXOGl;h(ft|k1rfGw^mmE;Z0m%)_us{{Q(CqvLF+! z@4w4=#4E11m&3B6q@G(?E2!j@2;1<A)>jM!5LqxtP*+pH3)OT6GqZQ1$WwcM1!?Q- zEvHTW97F&6vDP>7-ebFBUMJU*J1B;bF&ct99PJtE8S2LW;Lzy6H2z;77#Qds=pAx; c;V8SDzTBuYg{1Rv?iy@n3NCaC^G3AoKkj8Z3jhEB diff --git a/classifiers/train_classifiers.py b/classifiers/train_classifiers.py index 2600262..2fa365e 100644 --- a/classifiers/train_classifiers.py +++ b/classifiers/train_classifiers.py @@ -14,16 +14,16 @@ workflow = { 'train_obj': False, 'eval_obj': False, 'train_atr': False, - 'eval_atr': False, + 'eval_atr': True, } obj_classifier_train_params = { 'out_dir': '/home/tanmay/Code/GenVQA/Exp_Results/Obj_Classifier', - 'adam_lr': 0.001, + 'adam_lr': 0.0001, 'train_json': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/train_anno.json', 'image_dir': '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images', -# 'mean_image': '/home/tanmay/Code/GenVQA/Exp_Results/Obj_Classifier/mean_image.npy', - 'mean_image': '', + 'mean_image': '/home/tanmay/Code/GenVQA/Exp_Results/Obj_Classifier/mean_image.npy', +# 'mean_image': '', } obj_classifier_eval_params = { -- GitLab