From 8922eafeee83f2b435df1fe599db6dd1caa9f11d Mon Sep 17 00:00:00 2001
From: tgupta6 <tgupta6@illinois.edu>
Date: Fri, 19 Aug 2016 13:26:38 -0500
Subject: [PATCH] local change in crunchy constants and vqa eval code

---
 constants_crunchy.py               |  9 +++++---
 visual_util/visualize_relevance.py | 33 +++++++++++++++++++++++-------
 2 files changed, 32 insertions(+), 10 deletions(-)

diff --git a/constants_crunchy.py b/constants_crunchy.py
index e0b1cac..ac7d315 100644
--- a/constants_crunchy.py
+++ b/constants_crunchy.py
@@ -8,7 +8,7 @@ def mkdir_if_not_exists(dir_name):
 experiment_name = 'QA_explicit_dot_joint_training_pretrained_same_lr'
 #experiment_name = 'object_attribute_classifier_large_images'
 # Global output directory (all subexperiments will be saved here)
-global_output_dir = '/data/tanmay/GenVQA_Exp_Results'
+global_output_dir = '/home/tanmay/Code/GenVQA/Exp_Results/VQA'
 
 global_experiment_dir = os.path.join(
     global_output_dir,
@@ -154,7 +154,10 @@ vqa_answer_vocab_json = os.path.join(
 
 # VQA dataset params
 num_train_questions = 248349
-num_val_questions = 10000 #121512
+num_val_subset_questions = 10000
+num_val_questions = 121512
+num_val_rest_questions = num_val_questions - num_val_subset_questions
+
 num_test_questions = 0
 
 # Answer classifier training params
@@ -189,7 +192,7 @@ answer_fine_tune_from_iter = 19500
 answer_fine_tune_from = answer_model + '-' + str(answer_fine_tune_from_iter)
 
 # Answer eval params
-answer_eval_on = 'val'
+answer_eval_on = 'val_rest'
 answer_model_to_eval = answer_model + '-39000'
 
 answer_eval_data_json = os.path.join(
diff --git a/visual_util/visualize_relevance.py b/visual_util/visualize_relevance.py
index 521123b..72b07c7 100644
--- a/visual_util/visualize_relevance.py
+++ b/visual_util/visualize_relevance.py
@@ -2,6 +2,7 @@ import ujson
 import os
 import csv
 import numpy as np
+import random
 from matplotlib import cm
 
 import image_io
@@ -103,10 +104,13 @@ class RelevanceVisualizer():
         box_score_pairs = self.get_box_score_pairs(bboxes, rel_scores)
         rel_map = np.zeros(im.shape[0:2])
         for box, score in box_score_pairs:
-            gauss_map = self.make_gaussian(box, im.shape[0:2])
-            rel_map = np.maximum(rel_map, score*gauss_map)
+            box_map = self.make_boxmap(box, im.shape[0:2])
+            rel_map = rel_map + score*box_map
+            # gauss_map = self.make_gaussian(box, im.shape[0:2])
+            # rel_map = np.maximum(rel_map, score*box_map)
         rel_map_ =cm.jet(np.uint8(rel_map*255))[:,:,0:3]*255
-        im_rel_map = np.uint8(0.3*im+0.7*rel_map_)
+        # im_rel_map = np.uint8(0.5*im+0.5*rel_map_)
+        im_rel_map = np.uint8(0.2*im + 0.8*np.tile(rel_map[:,:,None], [1,1,3])*im)
         return rel_map, im_rel_map, ans, ans_score
 
     def make_gaussian(self, box, im_size):
@@ -121,6 +125,16 @@ class RelevanceVisualizer():
         g = np.exp(-((xx-cx)**2/(2*sigma_x**2)) - ((yy-cy)**2/(2*sigma_y**2)))
         return g
 
+    def make_boxmap(self, box, im_size):
+        im_h, im_w = im_size
+        x = int(box['x'])
+        y = int(box['y'])
+        w = int(box['w'])
+        h = int(box['h'])
+        map = np.zeros(im_size)
+        map[y-1:y+h-1, x-1:x+w-1] = 1.0
+        return map
+
     def write_html(self):
         col_dict = {
             0: 'Question',
@@ -128,9 +142,13 @@ class RelevanceVisualizer():
             2: 'Pos. Relevance',
             3: 'Pred. Answer',
             4: 'Pred. Relevance',
+            5: 'Question Id'
         }
         self.html_writer.add_element(col_dict)
-        for qid in rel_vis.eval_data.keys():
+        random.seed(0)
+        qids = sorted(rel_vis.eval_data.keys())
+        random.shuffle(qids)
+        for qid in qids[0:5000]:
             question = rel_vis.anno_data[qid]['question']
 
             pred_rel, pred_im_rel, pred_ans, pred_score = rel_vis.create_relevance_map(
@@ -159,11 +177,12 @@ class RelevanceVisualizer():
                 2 : self.html_writer.image_tag(pos_im_name, im_h, im_w),
                 3 : pred_ans + ': ' + str(pred_score),               
                 4 : self.html_writer.image_tag(pred_im_name, im_h, im_w),
+                5 : qid,
             }
             
             self.html_writer.add_element(col_dict)
 
-        self.html_writer.close()
+        self.html_writer.close_file()
 
 
 if __name__=='__main__':
@@ -185,9 +204,9 @@ if __name__=='__main__':
 
     eval_data_json = os.path.join(
         exp_dir,
-        'answer_classifiers/eval_val_data.json')
+        'answer_classifiers/eval_val_rest_data.json')
 
-    output_dir = os.path.join(exp_dir, 'qual_results2')
+    output_dir = os.path.join(exp_dir, 'qual_results_val_rest_conf')
     if not os.path.exists(output_dir):
         os.mkdir(output_dir)
 
-- 
GitLab