From 6832563e93049abe46eddf2f2ca1e226e4bbfb4f Mon Sep 17 00:00:00 2001
From: tgupta6 <tgupta6@illinois.edu>
Date: Mon, 21 Mar 2016 17:07:46 -0500
Subject: [PATCH] Mini batch training with fixed region proposals for ans
 network

---
 .../answer_classifier/ans_data_io_helper.py   |  63 +++++++---
 .../answer_classifier/ans_data_io_helper.pyc  | Bin 4181 -> 5192 bytes
 .../answer_classifier/train_ans_classifier.py | 119 +++++++++++++-----
 classifiers/region_ranker/perfect_ranker.py   |  97 ++++++++++----
 classifiers/region_ranker/perfect_ranker.pyc  | Bin 1767 -> 2750 bytes
 classifiers/tf_graph_creation_helper.py       |  22 +++-
 classifiers/tf_graph_creation_helper.pyc      | Bin 7351 -> 7719 bytes
 7 files changed, 223 insertions(+), 78 deletions(-)

diff --git a/classifiers/answer_classifier/ans_data_io_helper.py b/classifiers/answer_classifier/ans_data_io_helper.py
index 71b3760..5898855 100644
--- a/classifiers/answer_classifier/ans_data_io_helper.py
+++ b/classifiers/answer_classifier/ans_data_io_helper.py
@@ -12,6 +12,9 @@ import region_ranker.perfect_ranker as region_proposer
 
 qa_tuple = namedtuple('qa_tuple','image_id question answer')
 
+num_proposals = 22
+region_coords = region_proposer.get_region_coords()
+
 
 def create_ans_dict():
     ans_dict = {
@@ -67,39 +70,57 @@ def ans_mini_batch_loader(qa_dict, region_anno_dict, ans_dict, vocab,
                           image_dir, mean_image, start_index, batch_size, 
                           img_height=100, img_width=100, channels = 3):
     
-    # compute the number of proposals 
-    count = 0;
+    ans_labels = np.zeros(shape=[batch_size, len(ans_dict)])
     for i in xrange(start_index, start_index + batch_size):
-        count = count + len(region_anno_dict[qa_dict[i].image_id])
+        answer = qa_dict[i].answer
+        ans_labels[i-start_index, ans_dict[answer]] = 1
+    
+    # number of regions in the batch
+    count = batch_size*num_proposals;
 
-    region_images = np.empty(shape=[count, img_height, 
+    region_images = np.zeros(shape=[count, img_height, 
                                     img_width, channels])
-    
-    ans_labels = np.zeros(shape=[count, len(ans_dict)])
+    region_score = np.zeros(shape=[1,count])
+    partition = np.zeros(shape=[count])
     question_encodings = np.zeros(shape=[count, len(vocab)])
-
-    counter = 0
+        
     for i in xrange(start_index, start_index + batch_size):
         image_id = qa_dict[i].image_id
         question = qa_dict[i].question
         answer = qa_dict[i].answer
-        region_coords = region_anno_dict[image_id]
-        image = mpimg.imread(os.path.join(image_dir, str(image_id) + '.jpg'))
-        regions = region_proposer.rank_regions(image, question, region_coords)
-        for _, proposal in regions.items():
-            resized_region = misc.imresize(proposal.image, \
+        gt_regions_for_image = region_anno_dict[image_id]
+        image = mpimg.imread(os.path.join(image_dir,
+                                          str(image_id) + '.jpg'))
+        regions = region_proposer.rank_regions(image, question, region_coords,
+                                               gt_regions_for_image)
+        for j in xrange(num_proposals):
+            counter = j + (i-start_index)*num_proposals
+            resized_region = misc.imresize(regions[j].image, \
                                            (img_height, img_width))
-            region_images[counter,:,:,:] = (resized_region / 254.0) - mean_image
-            ans_labels[counter, ans_dict[answer]] = 1
+            region_images[counter,:,:,:] = (resized_region / 254.0) \
+                                           - mean_image
+            region_score[0,counter] = regions[j].score
+            partition[counter] = i-start_index
 
             for word in question[0:-1].split():
                 if word not in vocab:
                     word = 'unk'
                 question_encodings[counter, vocab[word]] += 1 
 
-            counter = counter + 1
+    # Check for nans, infs
+    assert (not np.any(np.isnan(region_images))), "NaN in region_images"
+    assert (not np.any(np.isnan(ans_labels))), "NaN in labels"
+    assert (not np.any(np.isnan(question_encodings))), "NaN in question_encodings"
+    assert (not np.any(np.isnan(region_score))), "NaN in region_score"
+    assert (not np.any(np.isnan(partition))), "NaN in partition"
+
+    assert (not np.any(np.isinf(region_images))), "Inf in region_images"
+    assert (not np.any(np.isinf(ans_labels))), "Inf in labels"
+    assert (not np.any(np.isinf(question_encodings))), "Inf in question_encodings"
+    assert (not np.any(np.isinf(region_score))), "Inf in region_score"
+    assert (not np.any(np.isinf(partition))), "Inf in partition"
 
-    return region_images, ans_labels, question_encodings
+    return region_images, ans_labels, question_encodings, region_score, partition
 
     
 if __name__=='__main__':
@@ -112,6 +133,8 @@ if __name__=='__main__':
     image_dir = '/home/tanmay/Code/GenVQA/GenVQA/' + \
                 'shapes_dataset/images'
 
+    mean_image = np.load('/home/tanmay/Code/GenVQA/Exp_Results/' + \
+                         'Obj_Classifier/mean_image.npy')
 
     qa_anno_dict = parse_qa_anno(train_anno_filename)
     region_anno_dict = region_proposer.parse_region_anno(region_anno_filename)
@@ -119,11 +142,13 @@ if __name__=='__main__':
     vocab, _ = get_vocab(qa_anno_dict)
     
     
-    region_images, ans_labels, question_encodings = \
+    region_images, ans_labels, question_encodings, score, partition = \
         ans_mini_batch_loader(qa_anno_dict, region_anno_dict, ans_dict, vocab,
-                              image_dir, None, 1, 2, 25, 25, 3)
+                              image_dir, mean_image, 1, 1, 25, 25, 3)
 
     print(ans_labels.shape)
     print(question_encodings.shape)
     print(region_images.shape)
+    print(score)
+    print(partition)
     
diff --git a/classifiers/answer_classifier/ans_data_io_helper.pyc b/classifiers/answer_classifier/ans_data_io_helper.pyc
index d03abbe8c016b6f922bcc5fb29c71faa48cdc2b8..6eda2294ef5263df1f1e80c298cf9d93111a0b99 100644
GIT binary patch
delta 2361
zcmZ`)-ESL35dZC+6UX-XBX$zUNgC3RG;Y!lN~tPAK!G-*K-Hq7CCF`Mxw(z8>ztjt
z3rf*hmPp|ZA#_4WNR(G3BqZn)5`qdOgv1*X4?OY&F9-yG0L-i}c9Rx+JvTe^+nL|Y
z?w)_1`Jp(e|C5aWarLiPRuO&-dM?q={o=jJJDt)fWDfN)Yy+h{tSD}iG7f7<DFs+D
zrA)wztC4_Z;I@v^B;*vVByMVOJ`+|7RvP)zKCqS{ZCYu=q&)%|M|}oX7FG`VZGqB$
z$O7a(ba-+=Q8)-W4W1r?9A`S$G?ZphIt)1hd4OGPnNt=@POAtx3poc#8TP{(QDYPq
zjRdw6xHf=N5v3z2&7-t{(ox7Gs2{^~=jD9&IVp>wVS%kjN!WlT@N#i1_()tCsEtvp
z?*1*l7vezhrG7``?!FrNREW8|k-@KpcqaHR`mHz>yghWXGeyW4{myc8f6@UDs0$9}
zN8xGkHLjsb5~7{J))G7&{-7FBYKw+**B8(>-~+yaLOp_Z5*95?@SKq26v9uUu%=;g
z<1wso+yRWVQ=F<3=1RdIf**q)M>V2yrTa`cT55+wrf+AXP~hQRMy9RbM>~tH*TsXh
zE23?j?C~B%JJ*{;(H`kdR7E~DB!@zKH26H0@8meTrK|c$YmdQ8p`M0627d%z2L33v
zX<Z&$zj4E;V1k{y{tMdU@M&~A!+prN3-E?f&%)2Mt57Ezsu$ESYHxc2KAqzTM*RWm
zCFLlfW6^d4Z)q&|C~FZc?nI&*GOz}C33q>IKZe<`xb*^aC9gGz{dM9CB6u3@N%%B$
z+>FAR)97xIg^rg`1Pg2@Z-+N?RYQSx=8V3$VQ4EG4Y9KEJ68DkP&Y_YHEFJzhT%Gz
zr>-YW-W1B+1JR)mK_4hkjOTkn*&gUC0(JOOKLnNOp?+1NROr2-hI^nvok<xFM4~eL
z%oh|WxeL_c`Uush{*pqacA-L0pZcZ(snEMfQKUZe4;4tw_kw!NJMK3Mr3{4PQ$g2_
zc8E^iFxCd>O^~GveUT0rUF+8>Em;$+MMxa4x2jco0{5CrHGbCqrH^@tG<Sr=#mdE*
znmyylYOQIPYwMM&blqg%(WrE8$cF0{`lgLd>3WQo_G;6r*;Uug^}S&vceUwAH`BMb
zDvnp<Wh(9S_S(bKhK@$1m$eg`{wMA3Nd2^-y_dE)L0sr%0oX0osW+t4bm_VR4QYFW
z^xSePc2%0=4CEHro9pG4lWMk_Zl&Rx{GK)Q!TH41ta68yt~Q&F<p!T5rk1)pz?GY`
z9E5{c>|4rQbM1;vFAm@t*Ok<ZroHSqO(%5L?6u-l@MGegNiVKex#eyTOx|0?$t(tC
zW30pfj`ug&<2QP*OpoCYwkjk~K96_3>Xo}8-11t}>2ACE6nmp2LYK#<sIDxl%}v`Q
z(nLS_)3B~BlgGVG0+DGkA(CQBk85d>7UN=6Olxf6Z(L->7+GT?M_Puos5m0>B1H&;
ztcUek5q`Cv7QCB$Uvp=JkCRhREN`{SrgS$Oo_oUGN9iuzsF%++Dy~~wt4Zg?x~znU
z_PE`;rH*}#Pipaaa5{B)lq)hHC!v~5+u6Eho(S%w{6|l*ahOC^?zE!Z-AMCEUc^l@
z53)GM;z}@|el~HARsIh(mxJ~6!u(lQIf&|1*_`9~1r{%|xWwXV78iqi=_?n`lWI`i
u2K6DWo*C|mp4JA1p~psIMr<T|#t^!QYCFG}h-+PZHj)<e+K}MyfcOu+rQd-7

delta 1370
zcmZWoON$&;6#i~izhBkw=VTtCCsUwE0#O&XA;g#o3bUxFgUy7Y?Ye2ZGE>#vRRcXz
zMK3xoU1;GpTS381?<Hg*x)Bll7lJ}?8T6c*VHimDz2|=CeeSuZf6@Qk?5HpDnLi4D
ze=@}Rm!tDGZ8!M3^K|h9=+|H-!JrN^6@doKbhI)svv{JS--K+z%;B+u*{Lw|Fbin(
z+hC*->O`nWXbrLs*~U_!e+9AvYaOx^u^W&zEP(?B{Y~_@AQuhDE39MD)ks8{$c;=l
zA+JKN!4xn{Fv~D2(aK<%#R1jSMSmOpYmnQpcCdS2UOPX#XQvJJeV|wAw&VZk+tUR>
zFGl2e><)<w{}S)4URQn=Vk3N^J{85)H?bdt=&o)heiGtV_%iuiTo0e6ZY(<Fl&1Y2
zZ8xh#p@S^uZ@~lnM6{A<EEU{+xP>4GKMPO5r$zc4T9$$qU9>??(>C@!6;=$U!ghWN
zej0uTqZqYE1bMJFey-&x%7U9TQX+NX6}5UvtMR3SBIXC;)yj4SS9VcQP`Y3e2+9{s
z5<w*@y@Xa!g_lCu%~Td8PGhRkEeQoRcm-G*{2F|6u41VosAK*+{5q^6KlR;*&-rbG
z27)HM5-hT7!lLNWogdMSNZW!>(-lL_kE7}5;TK_2hk-AZOCbC;lUtZ^_ysobsdW{!
zC_-=k0q5sANnp#OGC1#>2-@&zsd${k>^DI}P7)dlpDHGQm9^|Yq?e(vZ>zoOO6+s)
zI6YGl)0<*{y23seL@6~qQn7fTr)UE9#zf%7c^Ez)Or*;*^@cSWjaZIn&h0UeS~PV=
zmyli~;w6a8X9IgA>4AYGX&ZHx(>TGNMB(J|#5;}97jovfMuSvFlZ`mpad@v_#El}s
z@cY7A(%2@Xcfy4BRnpK28|Cmvt-i<*9N6yR(cm+A<a%Xdrzg_&#*Td`?V)3i?UCyx
zNj!8;Y){UjIxZ#4lWHbMY(I3Indurml0+V7Vwc7eIHb`4CNpQ^xPv2_2KI8t;r-%a
z?+!8UO(KXVL|beqn$VPWv92(ekA4}Ui<)SOoY2H|QB~T6(qvH%)zY`6vleo1#w{Yc
z9{yAMsK#{|95!l*eR4cGHEx8Tmi_SGvL^Pzc;ycLagEAr`nzmdV)7xA9rodO+H-7a
vyw9S0Om@TXDj$np__EU6QPb6QE?q5e=Y%Sf%K28gK24Nkn&>JikrMv{)ItEI

diff --git a/classifiers/answer_classifier/train_ans_classifier.py b/classifiers/answer_classifier/train_ans_classifier.py
index 10d7f9a..6d27300 100644
--- a/classifiers/answer_classifier/train_ans_classifier.py
+++ b/classifiers/answer_classifier/train_ans_classifier.py
@@ -12,6 +12,39 @@ import plot_helper as plotter
 import ans_data_io_helper as ans_io_helper
 import region_ranker.perfect_ranker as region_proposer 
 
+val_start_id = 106115
+val_batch_size = 1000
+
+batch_size = 10
+
+def evaluate(accuracy, qa_anno_dict, region_anno_dict, ans_vocab, vocab,
+             image_dir, mean_image, start_index, batch_size,
+             placeholders, img_height=100, img_width=100):
+    
+    correct = 0
+    for i in xrange(start_index, start_index + batch_size):
+        region_images, ans_labels, questions, \
+        region_score, partition= \
+            ans_io_helper.ans_mini_batch_loader(qa_anno_dict, 
+                                                region_anno_dict, 
+                                                ans_vocab, vocab, 
+                                                image_dir, mean_image, 
+                                                i, 1, 
+                                                img_height, img_width, 3)
+            
+        feed_dict = {
+            placeholders[0] : region_images, 
+            placeholders[1] : questions,
+            placeholders[2] : 1.0,
+            placeholders[3] : ans_labels,        
+            placeholders[4] : region_score,
+        }
+
+        correct = correct + accuracy.eval(feed_dict)
+
+    return correct/batch_size
+
+
 def train(train_params):
     sess = tf.InteractiveSession()
     
@@ -34,7 +67,7 @@ def train(train_params):
     vocab, inv_vocab = ans_io_helper.get_vocab(qa_anno_dict)
 
     # Create graph
-    image_regions, questions, keep_prob, y = \
+    image_regions, questions, keep_prob, y, region_score= \
         graph_creator.placeholder_inputs_ans(len(vocab), len(ans_vocab), 
                                              mode='gt')
     y_pred_obj = graph_creator.obj_comp_graph(image_regions, keep_prob)
@@ -49,13 +82,19 @@ def train(train_params):
     obj_atr_saver.restore(sess, model_to_restore)
 
     y_pred = graph_creator.ans_comp_graph(image_regions, questions, keep_prob, \
-                             obj_feat[0], atr_feat[0], 
-                             vocab, inv_vocab, len(ans_vocab))
-    cross_entropy = graph_creator.loss(y, y_pred)
-    accuracy = graph_creator.evaluation(y, y_pred)
+                                          obj_feat[0], atr_feat[0], 
+                                          vocab, inv_vocab, len(ans_vocab))
+    y_avg = graph_creator.aggregate_y_pred(y_pred, region_score, batch_size,
+                                           ans_io_helper.num_proposals, 
+                                           len(ans_vocab))
+#   y_avg = tf.matmul(region_score,y_pred)
+    
+    cross_entropy = graph_creator.loss(y, y_avg)
+    accuracy = graph_creator.evaluation(y, y_avg)
     
     # Collect variables
     vars_to_opt = tf.get_collection(tf.GraphKeys.VARIABLES, scope='ans')
+    
     train_step = tf.train.AdamOptimizer(train_params['adam_lr']) \
                          .minimize(cross_entropy, var_list=vars_to_opt)
     
@@ -78,56 +117,72 @@ def train(train_params):
     mean_image = np.load('/home/tanmay/Code/GenVQA/Exp_Results/' + \
                          'Obj_Classifier/mean_image.npy')
 
-    # Val data
-    val_region_images, val_ans_labels, val_questions = \
-        ans_io_helper.ans_mini_batch_loader(qa_anno_dict, region_anno_dict, 
-                                            ans_vocab, vocab, image_dir, 
-                                            mean_image, 9501, 499, 
-                                            25, 25, 3)
-    feed_dict_val = {
-        image_regions : val_region_images, 
-        questions: val_questions,
-        keep_prob: 1.0,
-        y: val_ans_labels,        
-    }
-
+    placeholders = [image_regions, questions, keep_prob, y, region_score]
     
     # Start Training
-    batch_size = 10
+#    batch_size = 1
     max_epoch = 10
-    max_iter = 950
+    max_iter = 9500
     val_acc_array_epoch = np.zeros([max_epoch])
     train_acc_array_epoch = np.zeros([max_epoch])
     for epoch in range(max_epoch):
         for i in range(max_iter):
             if i%100==0:
                 print('Iter: ' + str(i))
-                print('Val Acc: ' + str(accuracy.eval(feed_dict_val)))
 
-            train_region_images, train_ans_labels, train_questions = \
+                # val_accuracy = evaluate(accuracy, qa_anno_dict, 
+                #                         region_anno_dict, ans_vocab, vocab,
+                #                         image_dir, mean_image, 
+                #                         val_start_id, val_batch_size,
+                #                         placeholders, 25, 25)
+                # print(val_accuracy)
+
+            train_region_images, train_ans_labels, train_questions, \
+            train_region_score, train_partition= \
             ans_io_helper.ans_mini_batch_loader(qa_anno_dict, region_anno_dict, 
                                                 ans_vocab, vocab, image_dir, 
                                                 mean_image, 1+i*batch_size, 
                                                 batch_size, 25, 25, 3)
+    
+            
             feed_dict_train = {
                 image_regions : train_region_images, 
                 questions: train_questions,
-                keep_prob: 1.0,
+                keep_prob: 0.5,
                 y: train_ans_labels,        
+                region_score: train_region_score,
             }
-            _, current_train_batch_acc = sess.run([train_step, accuracy], 
-                                                  feed_dict=feed_dict_train)
+            
+            tf.shape(y_pred)
+
+            q_feat = tf.get_collection('q_feat', scope='ans/q_embed')
+            _,current_train_batch_acc,q_feat_eval = \
+                sess.run([train_step, accuracy, q_feat[0]],
+                         feed_dict=feed_dict_train)
+            
+#            print(q_feat_eval)
+ #           print(q_feat_eval.shape)
+#            print(i)
+#            print(train_questions)
+#            print(train_ans_labels)
+#            print(train_region_score)
+
             train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] + \
                                            current_train_batch_acc
 
         train_acc_array_epoch[epoch] = train_acc_array_epoch[epoch] / max_iter
-        val_acc_array_epoch[epoch] = accuracy.eval(feed_dict_val)
-        plotter.plot_accuracies(xdata=np.arange(0, epoch + 1) + 1, 
-                                ydata_train=train_acc_array_epoch[0:epoch + 1], 
-                                ydata_val=val_acc_array_epoch[0:epoch + 1], 
-                                xlim=[1, max_epoch], ylim=[0, 1.0], 
-                                savePath=os.path.join(outdir, 
-                                                      'acc_vs_epoch.pdf'))
+        # val_accuracy = evaluate(accuracy, qa_anno_dict, 
+        #                         region_anno_dict, ans_vocab, vocab,
+        #                         image_dir, mean_image, 9501, 499, 
+        #                         placeholders, 25, 25)
+        # val_acc_array_epoch[epoch] = val_accuracy
+        # print(val_accuracy)
+        # plotter.plot_accuracies(xdata=np.arange(0, epoch + 1) + 1, 
+        #                         ydata_train=train_acc_array_epoch[0:epoch + 1], 
+        #                         ydata_val=val_acc_array_epoch[0:epoch + 1], 
+        #                         xlim=[1, max_epoch], ylim=[0, 1.0], 
+        #                         savePath=os.path.join(outdir, 
+        #                                               'acc_vs_epoch.pdf'))
         save_path = saver.save(sess, os.path.join(outdir, 'ans_classifier'), 
                                global_step=epoch)
 
diff --git a/classifiers/region_ranker/perfect_ranker.py b/classifiers/region_ranker/perfect_ranker.py
index 737dfad..a26d8e1 100644
--- a/classifiers/region_ranker/perfect_ranker.py
+++ b/classifiers/region_ranker/perfect_ranker.py
@@ -7,6 +7,7 @@ import matplotlib.image as mpimg
 from scipy import misc
 region = namedtuple('region','image score coord')
 
+
 def parse_region_anno(json_filename):
     with open(json_filename,'r') as json_file:
         raw_data = json.load(json_file)
@@ -16,24 +17,66 @@ def parse_region_anno(json_filename):
         region_anno_dict[entry['image_id']] = entry['regions']
         
     return region_anno_dict
-        
-        
-def rank_regions(image, question, region_coords):
-    regions = dict()
-    count = 1;
-    for key in region_coords:
-        x1, y1, x2, y2 = region_coords[key]
-        cropped_image = image[y1-1:y2, x1-1:x2, :]
 
-        if key in question:
-            score = 1
-        else:
-            score = 0
 
+def get_region_coords():
+    region_coords = np.array([[   1,     1,   100,   100],
+                             [  101,    1,  200,  100],
+                             [  201,    1,  300,  100],
+                             [    1,  101,  100,  200],
+                             [  101,  101,  200,  200],
+                             [  201,  101,  300,  200],
+                             [    1,  201,  100,  300],
+                             [  101,  201,  200,  300],
+                             [  201,  201,  300,  300],
+                             [    1,    1,  100,  200],
+                             [  101,    1,  200,  200],
+                             [  201,    1,  300,  200],
+                             [    1,  101,  100,  300],
+                             [  101,  101,  200,  300],
+                             [  201,  101,  300,  300],
+                             [    1,    1,  200,  100],
+                             [  101,    1,  300,  100],
+                             [    1,  101,  200,  200],
+                             [  101,  101,  300,  200],
+                             [    1,  201,  200,  300],
+                             [  101,  201,  300,  300],
+                             [  1,  1,  300,  300]])
+    return region_coords
+
+def rank_regions(image, question, region_coords, gt_regions_for_image):
+
+    num_regions, _ = region_coords.shape
+    regions = dict()
+    
+    count = 0;
+    no_region_flag = True
+    for i in xrange(num_regions):
+        x1 = region_coords[i,0]
+        y1 = region_coords[i,1]
+        x2 = region_coords[i,2]
+        y2 = region_coords[i,3]
+    
+        cropped_image = image[y1-1:y2, x1-1:x2, :]
+        score = 0
+        
+        for gt_region in gt_regions_for_image:
+            x1_, y1_, x2_, y2_ = gt_regions_for_image[gt_region]
+            if x1==x1_ and x2==x2_ and y1==y1_ and y2==y2_:
+                score = 1
+                no_region_flag = False
+                break
+        
         regions[count] = region(image=cropped_image, score=score, 
-                                coord=region_coords[key])
+                                coord=region_coords[i,:])
         count = count + 1
 
+    if no_region_flag==True:
+        for i in xrange(num_regions):
+            regions[i] = region(image=regions[i].image, score=1.0/num_regions, 
+                                coord=regions[i].coord)
+    
+
     return regions
 
 
@@ -41,13 +84,25 @@ if __name__=='__main__':
     image_dir = '/home/tanmay/Code/GenVQA/GenVQA/shapes_dataset/images/'
 
     json_filename = os.path.join('/home/tanmay/Code/GenVQA/GenVQA/',
-                                 'shapes_dataset/regions_anno.json')
-    region_anno_dict = parse_region_anno(json_filename)
+                                 'shapes_dataset/train_anno.json')
+    # region_anno_dict = parse_region_anno(json_filename)
 
-    image_id = 1
-    question = 'Is there a blue triangle?'
-    region_coords = region_anno_dict[image_id]
-    image = mpimg.imread(os.path.join(image_dir, str(image_id) + '.jpg'))
-    regions = rank_regions(image, question, region_coords)
-    print(regions)
+    # image_id = 1
+    # question = 'Is there a blue triangle?'
+    # region_coords = region_anno_dict[image_id]
+    # image = mpimg.imread(os.path.join(image_dir, str(image_id) + '.jpg'))
+    # regions = rank_regions(image, question, region_coords)
+    # print(regions)
+#    count = 0
+    # for i in xrange(14999):
+    #     count = count + len(region_anno_dict[i+1])
+    # print(count)
+    # print(len(region_anno_dict[1]))
 
+    with open(json_filename,'r') as json_file:
+        raw_data = json.load(json_file)
+        
+        
+    for key in raw_data:
+        if key['image_id']==9999:
+            print(key['question_id'])
diff --git a/classifiers/region_ranker/perfect_ranker.pyc b/classifiers/region_ranker/perfect_ranker.pyc
index 04058d905a9d6519d32eafa0d15668d4dd4f9191..ac7c5aca66999387ddddd8c1307cd88b3c37391b 100644
GIT binary patch
literal 2750
zcmbVO&2HO95S}G%S+XU||FM#$F^ZzmA&D9vnv2mNBxrIn8oG!JAqK&UYlR9$(&QSI
z0P5uS+^6WF$Gk=R7<q#LeSr3xS<w~ROInE>&g>3nXJ@~eq5DT`wX5H~7|{Gz!}Ckr
z#kwL2aVd(4`q#XozN*F@>bt@_6fe<unflA5=I_*Kv_k!QrP`p;D)rYybSZAqxJCUI
zDZNbL5{1ij1!L<J*62s9)~0Ypm<<Z+!fcY>qOd`)9m-c#2wU5v6^(Xiv`ZoEHpRjo
z=^Lc?C~UF#^B(p0NfV7ar1$9xzTc!%?CAM-ll!D^(v!1~sDF#}EehAE6Sl?tTah=Y
z^Yq!pTf%jkNElAKn}|#FCw|9W1c%HjND_>7XfCr@+p#5#jiX{<IFPv>Mrl&Wd*fiJ
z4~jvW>w`g>=HY-%EaPUy$GD4A5JObTYS#UkB7<EzR4R6V=D>Y|`^Kd!qM1vR11cTt
zW1nVAR3efNbxKz_mpVrmkLVQQNtfL^hAH@DR+w7POdWj0uK7_YN`^JlOeeyOiqtZ3
z8iX<uMg!Ajw@nL#=l%02)*J@YSR4`4dGLcD1|~2Yn6hj7L6W3Cn=xz#3+2-;7!p$4
z`CBm$lGi%#Wja6C1LIrqK{hoU&n(Cb?Jt;I!(h<_A+@D;orY?uJx5^8yS|BAf_w%E
zK^_6b_HsC8LqrZx#)yQFMF10Gt(pZ71t>37$`GMuMJPjrvK66>5h&LxWeamFg2x4A
zD?%C9qGgDSmLV=$wr*Towr*Towr(t1zTOe0tSP%+@b=xs9W5@j=rc6`?Q^w-;hum~
zpnK9~cpXO^^axm`2>D0RZ%lq?at||I2b^J&WP*Yq&x5H=AcocaSS3%2{{fDn23!Q;
zzO}Ijqs0RdQoE}Cp8tI{;AGgv&EOLF{0i@pa3i9Z4yW8~nG=+A5y3!$uJE>Ib(-8&
z$WbIDvi8YYN6i}iTG?QwbL8l3wUQkxuT`>Z<z^)>S-Hh^p+uo`XuoXHPcXJ>g(eG#
zG%hgbNc$NIDST<uBqU`2le3pJ+hDFlReK4iH)*y-vu&E~u$6LC>fReFvFLWi!=bH;
z$KpG49_zy}N&z<c&BYtSb&4YdI!Y<urPS{EUay_Pj<3iqYgDe#5pGzyfmmHUM0u)n
z$hfZmq9)I1A!?5ZQJdSwn+@uW1aW8y2<$Fe1RLbx6`X)rZ_v@jP;tS=)Jc~M(s2Yr
z7LSa$$ORG5N!=4p%Cc|*mm$AF^zMsp9S^JVD#*0Q_M{4an_p_F@Do%epp7fp*Fjup
zkC*Uvm@wwL3?31s99~@N!k{B~TNpO1HLQSI_~&WvTbCGZ$>n$s;X=x`?=cqVK3*oq
ztg}*D!TdZ9hN2fqT1_6xV`?9hLn|Fh%^u`wmg&&0<*|cOxUW6I2@p*l`tqF4pOeG6
zeCYqjeB_eMrMucq4{~t`LQY#XoSLesZM9+l&V5yLP`A~m7_F(gv!!mUb*Bl9+v=7p
zG4%a$5GB4}d<GM}SLs;yOpuI&Y433w>fTeGym<aq^(gq*3TZ)wHa!V%(JKz#=hY9P
z^pE#V&P#Ixd%&H~<4AdY_EGyT!pP@V9WnRQ4|tl(p@csz<n(Z`+0^5HDEfmmj?vxt
zGr_|cNpl+qCW})ON8h8}F(+7?v%F{)8)>Nvin2EgHTGznMdP7`?jXW~T#qd{+sth<
zxd$?jDfe&R7a)isH0WHAo(1NWFrzd|JpSVG_=Cmcy?UG;9v`q^<+WaD<bsm->gH_Z
oXq<+ZvHqOBEIt8gD*kUsr3Q5yYmH9h<0jzIRE;(oyxYR;U#=b(5&!@I

delta 795
zcmYLHO>fgc5PfT>PHZPlnl?#Ii<BFdfK+hcgai^G;6jCqK(c_y#7ml_wNvj_0?Aen
z5!@@EkoXIT19$!d{0UB6;EtF{EAVQbXLdf{*k3z88e8V?hO__c=hG3+pA~xFCQ<Fv
zt@kH#4RH-K1KFyGZG>$|f$2J?8;BjmE*1vD4rCLugM~oYMO;VNL)<{P3E740!3#Y|
z7t;&>lO+|}VjtzitLRY~&UPSMkc5=#VPUorZbNS4{(}UF1@`>rmx1^wP8^=FP5{vs
z9m5t4(KSXU*?eh}+#yk)2{f<(3X}7T3f_D`Zs6{VQxq$#<ty->S5R0eswiqGY~(;*
z<?zki#ybkJ&WsuxF%5zf)?S<N3Wp2i0y7JlUbb+NP`rV{g(h+)a#~acxr_K;!v)No
z92MLop+-F#?5K-pbh+nSH1wm1Ae@ju^B0*MMq|0u)F{nl=~kUx-ylc8Tm#m8W*)Q%
zbdBKXSSme96Wt&qlVehcqcqK86;wEetB%s+L~GvKOuj9@S9io(`PjLBjVonQG8>-d
zsSep{lK{dN7VlbIHR{4PETb*f#Hvw#swF|6qxPe@RKqyZk&=4nJbxG^NjjLSG+EM8
z9z{M>eZ4O;*^m0K4vuACXOk!yAIMvZ#~V!NWB+3L()OyF$tCkLx1Wes`OUs0w#z^E
zjmyj{U>3nT!v=xo)lCjF8O2LhN@q*5<*sw@ich|};L{RKI)sMp+Us>;F&9xcEaK(}
HpZ=)7wOEct

diff --git a/classifiers/tf_graph_creation_helper.py b/classifiers/tf_graph_creation_helper.py
index 71240aa..1b18cc0 100644
--- a/classifiers/tf_graph_creation_helper.py
+++ b/classifiers/tf_graph_creation_helper.py
@@ -35,13 +35,15 @@ def placeholder_inputs_ans(total_vocab_size, ans_vocab_size, mode='gt'):
     image_regions = tf.placeholder(tf.float32, shape=[None,25,25,3])
     keep_prob = tf.placeholder(tf.float32)
     questions = tf.placeholder(tf.float32, shape=[None,total_vocab_size])
+    region_score = tf.placeholder(tf.float32, shape=[1,None])
+    
     if mode == 'gt':
         print 'Creating placeholder for ground truth'
         gt_answer = tf.placeholder(tf.float32, shape=[None, ans_vocab_size])
-        return (image_regions, questions, keep_prob, gt_answer)
+        return (image_regions, questions, keep_prob, gt_answer, region_score)
     if mode == 'no_gt':
         print 'No placeholder for ground truth'
-        return (image_regions, questions, keep_prob)
+        return (image_regions, questions, keep_prob, region_score)
         
 
 def obj_comp_graph(x, keep_prob):
@@ -93,15 +95,14 @@ def atr_comp_graph(x, keep_prob, obj_feat):
 def ans_comp_graph(image_regions, questions, keep_prob, \
                    obj_feat, atr_feat, vocab, inv_vocab, ans_vocab_size):
     with tf.name_scope('ans') as ans_graph:
-
         with tf.name_scope('word_embed') as word_embed:
-            initial = tf.random_uniform(shape=[len(vocab),100], minval=0, maxval=1)
+            initial = tf.truncated_normal(shape=[len(vocab),100], stddev=0.1)
+#            initial = tf.random_uniform(shape=[len(vocab),100], minval=0, maxval=1)
             word_vecs = tf.Variable(initial, name='word_vecs')
 
         with tf.name_scope('q_embed') as q_embed:
             q_feat = tf.matmul(questions, word_vecs)
-            num_words = tf.reduce_sum(questions, 1, keep_dims=True)
-            q_feat = tf.truediv(q_feat, num_words)
+#            q_feat = tf.truediv(q_feat, tf.cast(len(vocab),tf.float32))
 
         with tf.name_scope('conv1') as conv1:
             W_conv1 = weight_variable([5,5,3,4])
@@ -135,6 +136,15 @@ def ans_comp_graph(image_regions, questions, keep_prob, \
 
         return y_pred
 
+def aggregate_y_pred(y_pred, region_score, batch_size, num_proposals, ans_vocab_size):
+    y_pred_list = tf.split(0, batch_size, y_pred)
+    region_score_list = tf.split(1, batch_size, region_score)
+    y_avg_list = []
+    for i in xrange(batch_size):
+        y_avg_list.append(tf.matmul(region_score_list[i],y_pred_list[i]))
+    y_avg = tf.concat(0, y_avg_list)
+    return y_avg
+        
 def evaluation(y, y_pred):
     correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_pred, 1), name='correct_prediction')
     accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
diff --git a/classifiers/tf_graph_creation_helper.pyc b/classifiers/tf_graph_creation_helper.pyc
index 0a2ca99d6b8999cdfb57a84d3560b6204947ad8c..cb966f7932fe3d93f724fcdb8c59eb25570ee614 100644
GIT binary patch
delta 2591
zcmbtWO>7%Q6n?Xg9ow;E$8o%|o&Thb+x(Hz7FzlfAXQshm3Bkx;uaa#yLBDXIJGyS
zt#)gqaHz^5LIV;Mq~1V?Ln{RdaVs|tTsd%p0~fdy#0faS`^GVif{>7~z2DBfdGGBv
z^M2N!@B8>b+Vg8T_+jPeb4A+twDWz2ui6i9r|;Fx4tkR)7bB~a%ox3<QT2Oac2X`*
zmXGGUXqD)qM!5v#bTZ>)c9EXzR%=PJx@lEVE~TbvvQn5ryN7bUWO-=55B9l!vifO$
z0OPf<NmwDW2FdDS?YbHzU}q-C)M-@{7d@04qTDd$M#u`2HAL13%PF@_x!g`>H<?K?
z@idVeC98cy#~LLwP03ZZ&ootglxnY1-9c7FsSd-V(mqK^YlpI8Iu>(gUW(1CpK?2u
zlQFf0ak9o~enLqmAz{WMYYIAZevydpbD6&NKg37gc0ZqOD__s!jK7ulTeEeWXkHLd
zgR0p=CQ(TPDc4613%%;(KO@seM~`h)Cvrqny_FmfIbL#F$l<Y%E_-PX9$J-$Z^-ns
zhsQds&?2w;(pp)_8~$bCmyi7Cq9Nv1wMym6V!l?aR4q9hINz0EA&C(Y6Mdpj3kfgZ
zJJ;_Ae$zxoeiAw&#^j^W1u-E{g!5v0{nPLv?O=vY%3gp6(86G7fE|FHfH8(L4yF7j
zf6BK7%f_)VA-6|&_fKIM0DzPc1v~-h03_tCs9rGk@Hhj0Mhwskm;~$vfQ0cB;Ay}<
zz<$6p06gC~05}LZ1UL+s0Xz$skiSF+d<d8^D`%3E@<>O_cZ{dCo$^6=SiaE_nI7UP
z$f>Y?#d~Z-iNnd6u(qzb{-y+RrL`IPuwy8-N}3x~3mmT?qQvn**xTf*!FV)DZb+?i
zKm|E%@^ma-4=Vec<hGOTBePBYLS%-i)Pf~9LN@#1sd}~>RUZ5_A0RtO4rfaQ<>Ew1
z$Mf3N^E$|GBd0^z$C?Hqa$+j|SPeG9?WAm-bMwskjon2Rm-onxD=y4J5#g?}!KF)a
z;c0_QqUpN9g}rkJVJ?ZyTy({SYn}Jf-85h>-HJ>7Z!$@>7AA*_Jx1J=xMZ-SxT2b(
z>}IC?96t3Ga?^@wC&~l*jjqBc-t!MRx|eW!x5(@07+C023#!gG9I1}u_G8QN-q`ZM
zCLSCF4jR`v$1_C^cQ~y%<*MZ_q>;X{%zn8!KA!V7`_+8EGeA0yW>A?%(4E*Bs|uL%
zyoa2ynmdCu*Ej?|iMdR#yc^%vZg7E7>F)dZd~sNQ7w<o-Dr9S!{+9}6PX>CKjrz^z
z8!f{)3h^;n=o+o7az*JIs*X|rHl_>KqJg$x><0XMd1G}NunX`cK-D^`%}_9m(c^+Y
z&QlwnS1QHACAlv#R6hj`5Hwx{oCdrE`2T%@z1V0=&8_Dz7pILE;QIuYa5UL){+be_
zF>JJtVLhhjd0Io;AuYx&bwo?^z1t&C>r-{`5Ax;X3ud)j3<xg28@LT{z3}_RL$0?e
zWC{N`-&p9Q5|P_V*Y}XE<eV$BuZ$8mAWq78kFw$H;unjbl6Y0{jkAZ<#I=KzwW*H$
z#DiLqMQ(tc0A~-vcB;%kWn@qW<O~Saao3g>$~LznqU+VdV#!j&!qSqpXsQ^id9kus
zEZ7-r&6ov@z#P*{1-p19Un}3V>@dAdw77gVzf`R(RceKWnlT8qHijE4w9NcMxn|qw
z|C`@Y4Jn-6$QM>hjVYLx6+tDGUut7x$jL&f#I^<7$~RoT#j+ay`H<-1uTSd~+c+xn
zmi|+lVtR#_j53;k1dp&*3Jc2xn_rOc$_L%mjM@ZKTP|Ap+Va&#j#o<R(2PFVDlZjw
zuvdH_Ur*i>lTuF&#*wcz<bT1gPVxDy?8;2Zlc{(5x5MZN;3VJzfCEbg|D7PCDt}Mi
z_MO8pD{rTVyUt>`r66YI!?Yg5dr)O!W2d`}<MPM!oxbN-91<ZB(L!GSw(z$t6bb13
L9S;QMT2JOLSf9|m

delta 2232
zcmbtVOK)366#nMgaU8!Nar{c0*l8L!O&VHCOQ9`M4Q&7+wKpg(CY6=uPJ-RoZfz$#
zbd8a+Dc!Wyuwa9_gC#;$1uK35`z{a&i4_})1VXSX=Zu{;EejUJ@s~5_HQ$^$bMnQ>
zuTP{MzxzE`gAbP1(Ekb0-v#=%zQ3Km)0GieVO%lLyf3T>N>NA`s>9d;Rt%J4D2>31
z!Wu@r6xX%{WE?vJrKDb_Ad|dg?=(uIkPcMGIKGsD%%D2X^3Crcqz`feGR@vB#7OAE
zioqJejv-bYC}mN~p)?8Uhs;7wk{+dfI^`6sIIILL-c6MDLk9XjazCsTk~<VXz3ZCR
zuA|yD4;j?1InLPkmU$giAUcvb_vv8AjnV;~WJVw1Aml+*XSHUIHN;&MErAOc>j3>-
z%8%W38lMjY2C1a{EpTb?sc#00UfY1>)U{P=)|F7_-JWAE*bdkOu${17Sa)KHy<FPs
zD_Da#xA@3m-GL<ww1r!J=sKf*a2Ld&$__27>qEupFzo}ePeepU_{0EV=<Xjwe;Z<2
z{px!~%&4ROx5ccw<u8l*yTAEQvtd4z@a8GZJisucvazV^Ad9o=O6cgmLo5z4_!vSA
z^9*5z5r${fx1sn-Uds_yj4{kHJjXE2aE#$N!}AO$7+zqQVR(_@B*Q6&(+n>$EHKQf
zW8n$c85RrbwsTH>9FB^!>a+0i!;_?BPP)Q>89*DT3cduol$$>r9rv!r(3-PqAd>4+
zNi8?soTKB>W~#df)(|WYH%G^dB9-{U#eOcYjx3<#)6R5H0yZfPbo^RF_eWd0VUfci
zw{0h&^={aH*nz(1<D6ac_Uoh{pc6!!HV)|DAgmBJoNVfZ&?Zi_)JN%rbw)3$v<D>x
z>=5U*!-zBSuqGZsI{-VP<D<I)LD*4nLy;Sqh+TL1$pc0;mrv1&X)eS<6QTFr=Q5(X
z(6YxRzMHzog|hQwg&p6^C84<_G?(OV0C7pe?(QX%(zX!nRR7s;^sF>bh-&&traIlt
zZFEL8(-GK%VE-gPT)w)of9ZL&h)!m&o(2pB=t0mK*9K~UM~P@oQNRQrnU%dGXZP@+
zOX%GObwV08`5XESw4`U8;boj#r}v;~)IDC)%}rqE6e;X1;(X0H9ZCZ?!^y~1a44<E
zxp{bP=djf49sTYn)R)6kwYH0zzE-JkuWnXILT|nL9<K$hO?nD^GT_}t(<*PvwHCD|
zaIGwBm!+jGSERgJwkoxjexa2CGFKc7P6G3(nUL$v1h#OVwws_>Zpw{Hqi&vM^*Oa1
zo9;$g<SZsn4r)iBxmvdxwenWIvfgOc+HBmEb(8lqpJgWxJ4tJ6O_p0*wKi}6W^+qw
z!fg_)%JzSaAkKA|V9Gqg;!%S9BuhC4Ej^w}i|RpazWWAS_!#DSh6@bK1iCi$ty-BY
z*Q?F{n_irghv#m&%Ij-~%~#XB-)n5+dKSyqw0h5wyFU_5Um{a$fdEFth=>`HCX9%L
zNEk}Q=hcsiU%UmfnrjS}9*z%_L2*L`Q_YO|7HNC+ZfxWo?2=*75mk2f=wfd+TCH2^
zr_>!Wr#?z&hZo7I#l5rIZqBbY>f1;1ht$3FC#flpewpEQhRY1xe&(inXY{&jg~dzi
z&(X=)MHZi`eL)=>i%0nXf<E&5<*Rh*N6bZ49J?{PKx!ZT(+V2C0s1)U<MsK6;`EtQ
I56ANV039)lasU7T

-- 
GitLab