From e6318fb69606717eb79bdca38c0e4d6eda62a5f3 Mon Sep 17 00:00:00 2001 From: tgupta6 <tgupta6@illinois.edu> Date: Wed, 21 Sep 2016 15:36:52 -0500 Subject: [PATCH] inner_product_selection functional but not a good idea --- .../inference.py | 65 +++++++++++++++++-- answer_classifier_cached_features/train.py | 2 + constants_crunchy.py | 2 +- 3 files changed, 61 insertions(+), 8 deletions(-) diff --git a/answer_classifier_cached_features/inference.py b/answer_classifier_cached_features/inference.py index d599de1..c826717 100644 --- a/answer_classifier_cached_features/inference.py +++ b/answer_classifier_cached_features/inference.py @@ -10,6 +10,8 @@ class AnswerInference(): answer_region_scores, question_vert_concat, answers_vert_concat, + noun_embed, + adjective_embed, num_answers, space_dim, keep_prob): @@ -20,16 +22,30 @@ class AnswerInference(): self.answer_region_scores = answer_region_scores self.question_vert_concat = question_vert_concat self.answers_vert_concat = answers_vert_concat + self.noun_embed = noun_embed + self.adjective_embed = adjective_embed self.num_answers = num_answers self.keep_prob = keep_prob + self.ordered_noun_keys = ['positive_nouns'] + self.ordered_adjective_keys = ['positive_adjectives'] + for i in xrange(self.num_answers-1): + self.ordered_noun_keys.append('negative_nouns_' + str(i)) + self.ordered_adjective_keys.append('negative_adjectives_' + str(i)) + with tf.variable_scope('answer_graph'): - self.obj_atr_qa_elementwise_prod = [None]*self.batch_size + self.selected_noun_adjective = [None]*self.batch_size for j in xrange(self.batch_size): - self.obj_atr_qa_elementwise_prod[j] = self.elementwise_product( - self.object_feat[j], - self.attribute_feat[j], - self.question_vert_concat[j], - self.answers_vert_concat[j]) + noun_embed = [] + adjective_embed = [] + for key1, key2 in zip(self.ordered_noun_keys,self.ordered_adjective_keys): + noun_embed.append(self.noun_embed[key1][j]) + adjective_embed.append(self.adjective_embed[key2][j]) + + self.selected_noun_adjective[j] = self.inner_product_selection( + self.object_feat[j], + self.attribute_feat[j], + noun_embed, + adjective_embed) self.per_region_answer_scores = [None]*self.batch_size for j in xrange(self.batch_size): @@ -60,7 +76,7 @@ class AnswerInference(): obj_atr_qa_feat = tf.concat( 2, - [self.obj_atr_qa_elementwise_prod[j], q_feat, a_feat]) + [self.selected_noun_adjective[j], q_feat, a_feat]) obj_atr_qa_feat = tf.expand_dims( obj_atr_qa_feat, @@ -104,6 +120,41 @@ class AnswerInference(): 0, keep_dims=True) + def inner_product_selection(self, obj_feat, atr_feat, noun_embed, adjective_embed): + feats = [] + for k in xrange(18): + scores = tf.matmul( + obj_feat, + tf.transpose(noun_embed[k])) + scores1 = tf.nn.softmax(scores) + feat1 = tf.matmul( + scores1, + noun_embed[k]) + + scores = tf.matmul( + atr_feat, + tf.transpose(adjective_embed[k])) + scores2 = tf.nn.softmax(scores) + feat2 = tf.matmul( + scores2, + adjective_embed[k]) + + feat = tf.concat( + 1, + [feat1, feat2]) + + # feat = tf.tile( + # tf.expand_dims(feat,1), + # [1,self.num_answers,1]) + + feats.append(feat) + + feats = tf.pack(feats) + feats = tf.transpose( + feats, + [1,0,2]) + return feats + def elementwise_product(self, obj_feat, atr_feat, ques_feat, ans_feat): tiled_ques = tf.tile(tf.reshape(ques_feat,[1, -1]),[self.num_answers,1]) qa_feat = tf.concat( diff --git a/answer_classifier_cached_features/train.py b/answer_classifier_cached_features/train.py index e6de431..968ee77 100644 --- a/answer_classifier_cached_features/train.py +++ b/answer_classifier_cached_features/train.py @@ -103,6 +103,8 @@ class graph_creator(): self.relevance_inference.answer_region_prob, self.question_embed_concat, self.answers_embed_concat, + self.noun_embed, + self.adjective_embed, self.num_neg_answers + 1, self.space_dim, self.plh['keep_prob']) diff --git a/constants_crunchy.py b/constants_crunchy.py index 9ae185b..ec7de0c 100644 --- a/constants_crunchy.py +++ b/constants_crunchy.py @@ -5,7 +5,7 @@ def mkdir_if_not_exists(dir_name): if not os.path.exists(dir_name): os.mkdir(dir_name) -experiment_name = 'QA_classifier_joint_pretrain_wordvec_xform_large_obj_atr_wt' #'QA_joint_pretrain_genome_split' +experiment_name = 'QA_classifier_joint_pretrain_wordvec_xform_select' #'QA_joint_pretrain_genome_split' # Global output directory (all subexperiments will be saved here) global_output_dir = '/home/tanmay/Code/GenVQA/Exp_Results/VQA' -- GitLab