Skip to content
Snippets Groups Projects
Commit e6318fb6 authored by tgupta6's avatar tgupta6
Browse files

inner_product_selection functional but not a good idea

parent 97d8c1ed
No related branches found
No related tags found
No related merge requests found
......@@ -10,6 +10,8 @@ class AnswerInference():
answer_region_scores,
question_vert_concat,
answers_vert_concat,
noun_embed,
adjective_embed,
num_answers,
space_dim,
keep_prob):
......@@ -20,16 +22,30 @@ class AnswerInference():
self.answer_region_scores = answer_region_scores
self.question_vert_concat = question_vert_concat
self.answers_vert_concat = answers_vert_concat
self.noun_embed = noun_embed
self.adjective_embed = adjective_embed
self.num_answers = num_answers
self.keep_prob = keep_prob
self.ordered_noun_keys = ['positive_nouns']
self.ordered_adjective_keys = ['positive_adjectives']
for i in xrange(self.num_answers-1):
self.ordered_noun_keys.append('negative_nouns_' + str(i))
self.ordered_adjective_keys.append('negative_adjectives_' + str(i))
with tf.variable_scope('answer_graph'):
self.obj_atr_qa_elementwise_prod = [None]*self.batch_size
self.selected_noun_adjective = [None]*self.batch_size
for j in xrange(self.batch_size):
self.obj_atr_qa_elementwise_prod[j] = self.elementwise_product(
self.object_feat[j],
self.attribute_feat[j],
self.question_vert_concat[j],
self.answers_vert_concat[j])
noun_embed = []
adjective_embed = []
for key1, key2 in zip(self.ordered_noun_keys,self.ordered_adjective_keys):
noun_embed.append(self.noun_embed[key1][j])
adjective_embed.append(self.adjective_embed[key2][j])
self.selected_noun_adjective[j] = self.inner_product_selection(
self.object_feat[j],
self.attribute_feat[j],
noun_embed,
adjective_embed)
self.per_region_answer_scores = [None]*self.batch_size
for j in xrange(self.batch_size):
......@@ -60,7 +76,7 @@ class AnswerInference():
obj_atr_qa_feat = tf.concat(
2,
[self.obj_atr_qa_elementwise_prod[j], q_feat, a_feat])
[self.selected_noun_adjective[j], q_feat, a_feat])
obj_atr_qa_feat = tf.expand_dims(
obj_atr_qa_feat,
......@@ -104,6 +120,41 @@ class AnswerInference():
0,
keep_dims=True)
def inner_product_selection(self, obj_feat, atr_feat, noun_embed, adjective_embed):
feats = []
for k in xrange(18):
scores = tf.matmul(
obj_feat,
tf.transpose(noun_embed[k]))
scores1 = tf.nn.softmax(scores)
feat1 = tf.matmul(
scores1,
noun_embed[k])
scores = tf.matmul(
atr_feat,
tf.transpose(adjective_embed[k]))
scores2 = tf.nn.softmax(scores)
feat2 = tf.matmul(
scores2,
adjective_embed[k])
feat = tf.concat(
1,
[feat1, feat2])
# feat = tf.tile(
# tf.expand_dims(feat,1),
# [1,self.num_answers,1])
feats.append(feat)
feats = tf.pack(feats)
feats = tf.transpose(
feats,
[1,0,2])
return feats
def elementwise_product(self, obj_feat, atr_feat, ques_feat, ans_feat):
tiled_ques = tf.tile(tf.reshape(ques_feat,[1, -1]),[self.num_answers,1])
qa_feat = tf.concat(
......
......@@ -103,6 +103,8 @@ class graph_creator():
self.relevance_inference.answer_region_prob,
self.question_embed_concat,
self.answers_embed_concat,
self.noun_embed,
self.adjective_embed,
self.num_neg_answers + 1,
self.space_dim,
self.plh['keep_prob'])
......
......@@ -5,7 +5,7 @@ def mkdir_if_not_exists(dir_name):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
experiment_name = 'QA_classifier_joint_pretrain_wordvec_xform_large_obj_atr_wt' #'QA_joint_pretrain_genome_split'
experiment_name = 'QA_classifier_joint_pretrain_wordvec_xform_select' #'QA_joint_pretrain_genome_split'
# Global output directory (all subexperiments will be saved here)
global_output_dir = '/home/tanmay/Code/GenVQA/Exp_Results/VQA'
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment