diff --git a/answer_classifier_cached_features/inference.py b/answer_classifier_cached_features/inference.py
index 7f5d0c815872900e4aa5a6bd293b6f04124ccf4c..8feae9671a1981de8cdf46f48c4159b226bbd441 100644
--- a/answer_classifier_cached_features/inference.py
+++ b/answer_classifier_cached_features/inference.py
@@ -57,6 +57,19 @@ class AnswerInference():
                     noun_embed,
                     adjective_embed)
             
+                # 100 x 18
+                answer_region_scores_ = tf.transpose(self.answer_region_scores[j])
+                
+                # 100 x 18 x 1
+                answer_region_scores_ = tf.expand_dims(
+                    answer_region_scores_, 
+                    2)
+
+                # 18 x 2
+                self.selected_noun_adjective[j] = tf.reduce_mean(
+                    self.selected_noun_adjective[j]*answer_region_scores_,
+                    0)
+                
 #            self.per_region_answer_scores = [None]*self.batch_size
             obj_atr_qa_feat = [None]*self.batch_size
             for j in xrange(self.batch_size):
@@ -65,55 +78,82 @@ class AnswerInference():
                 else:
                     reuse_vars = True
 
+                # 1 x 1200
                 q_feat = tf.reshape(
                     self.question_vert_concat[j],
                     [1, -1])
-        
-                q_feat = tf.expand_dims(q_feat,0)
-        
+
+                # 18 x 1200
                 q_feat = tf.tile(
                     q_feat,
-                    [self.num_regions, self.num_answers, 1])
+                    [self.num_answers,1])
+                
+                #q_feat = tf.expand_dims(q_feat,0)
+        
+                # # 18 x 1200
+                # q_feat = tf.tile(
+                #     q_feat,
+                #     [self.num_answers, 1])
 
-                a_feat = tf.expand_dims(
-                    self.answers_vert_concat[j],
-                    0)
+                # a_feat = tf.expand_dims(
+                #     self.answers_vert_concat[j],
+                #     0)
 
-                a_feat = tf.tile(
-                    a_feat,
-                    [self.num_regions, 1, 1])
-
-                # 100 x 1000
-                obj_det_feat = tf.expand_dims(
-                    self.obj_detector_scores[j],
-                    1)
-
-                # 100 x 18 x 1000
-                obj_det_feat = tf.tile(
-                    obj_det_feat,
-                    [1, self.num_answers, 1])
-
-                # 100 x 1000
-                atr_det_feat = tf.expand_dims(
-                    self.atr_detector_scores[j],
-                    1)
-
-                # 100 x 18 x 1000
-                atr_det_feat = tf.tile(
-                    atr_det_feat,
-                    [1, self.num_answers, 1])
-
-                # 1 x 18 x 2
-                yes_no_feat_ = tf.expand_dims(
-                    self.yes_no_feat[j],
-                    0)
+                # a_feat = tf.tile(
+                #     self.,
+                #     [self.num_regions, 1, 1])
+
+                # 18 x 300
+                a_feat = self.answers_vert_concat[j]
 
-                yes_no_feat_ = tf.tile(
-                    yes_no_feat_,
-                    [self.num_regions, 1, 1])
+                # 18 x 1000
+                obj_det_feat = tf.matmul(self.answer_region_scores[j],self.obj_detector_scores[j])
 
+                # # 100 x 1 x 1000
+                # obj_det_feat = tf.expand_dims(
+                #     self.obj_detector_scores[j],
+                #     1)
+
+                # # 100 x 18 x 1000
+                # obj_det_feat = tf.tile(
+                #     obj_det_feat,
+                #     [1, self.num_answers, 1])
+
+                # 18 x 1000
+                atr_det_feat = tf.matmul(self.answer_region_scores[j],self.atr_detector_scores[j])
+
+                # # 100 x 1000
+                # atr_det_feat = tf.expand_dims(
+                #     self.atr_detector_scores[j],
+                #     1)
+
+                # # 100 x 18 x 1000
+                # atr_det_feat = tf.tile(
+                #     atr_det_feat,
+                #     [1, self.num_answers, 1])
+
+                # 18 x 2 
+                yes_no_feat_ = self.yes_no_feat[j]
+
+                # # 1 x 18 x 2
+                # yes_no_feat_ = tf.expand_dims(
+                #     self.yes_no_feat[j],
+                #     0)
+
+                # yes_no_feat_ = tf.tile(
+                #     yes_no_feat_,
+                #     [self.num_regions, 1, 1])
+
+                print '-'*80
+                print q_feat.get_shape()
+                print a_feat.get_shape()
+                print self.selected_noun_adjective[j].get_shape()
+                print yes_no_feat_.get_shape()
+                print obj_det_feat.get_shape()
+                print atr_det_feat.get_shape()
+                # 18 x (2 + 2 + 1000 + 1000 + 1200 + 300)
                 obj_atr_qa_feat[j] = tf.concat(
-                    2,
+                    1,
                     [self.selected_noun_adjective[j], yes_no_feat_, obj_det_feat, atr_det_feat, q_feat, a_feat])
 
 
@@ -121,50 +161,42 @@ class AnswerInference():
                 #     obj_atr_qa_feat[j],
                 #     0)
 
+            # batch_size x 18 x 3500
             obj_atr_qa_feat = tf.pack(obj_atr_qa_feat)
+
+            # batch_size x 1 x 18 x 3500
+            obj_atr_qa_feat = tf.expand_dims(obj_atr_qa_feat,1)
             print obj_atr_qa_feat.get_shape()
 
-            self.per_region_answer_scores = layers.conv2d(
+            # batch_size x 1 x 18 x 2500
+            self.answer_score = layers.conv2d(
                 obj_atr_qa_feat,
                 1,
                 2500,
                 'per_region_ans_score_conv_1',
                 func = None)
             
-            self.per_region_answer_scores = tf.nn.relu(
+            self.answer_score = tf.nn.relu(
                 layers.batch_norm(
-                    self.per_region_answer_scores,
+                    self.answer_score,
                     tf.constant(self.is_training)))                
 
-            self.per_region_answer_scores = layers.conv2d(
-                self.per_region_answer_scores,
+            # batch_size x 1 x 18 x 1
+            self.answer_score = layers.conv2d(
+                self.answer_score,
                 1,
                 1,
                 'per_region_ans_score_conv_2',
                 func = None)
                 
-            print self.per_region_answer_scores.get_shape()
-            self.per_region_answer_scores = tf.squeeze(
-                self.per_region_answer_scores,
+            print self.answer_score.get_shape()
+            # batch_size x 1 x 18
+            self.answer_score = tf.squeeze(
+                self.answer_score,
                 [3])
             
-            self.per_region_answer_scores = tf.unpack(self.per_region_answer_scores)
+            self.answer_score = tf.unpack(self.answer_score)
 
-            self.per_region_answer_prob = [None]*self.batch_size
-            self.answer_score = [None]*self.batch_size
-            for j in xrange(self.batch_size):
-                self.per_region_answer_prob[j] = tf.nn.softmax(
-                    self.per_region_answer_scores[j],
-                    'per_region_answer_prob_softmax')
-                
-                answer_score_tmp = tf.mul(
-                    self.per_region_answer_scores[j],
-                    tf.transpose(self.answer_region_scores[j]))
-            
-                self.answer_score[j] = tf.reduce_sum(
-                    answer_score_tmp,
-                    0,
-                    keep_dims=True)
         
     def inner_product_selection(self, obj_feat, atr_feat, noun_embed, adjective_embed):
         feats = []
diff --git a/answer_classifier_cached_features/train.py b/answer_classifier_cached_features/train.py
index 32aa73ae2491d2f94112ccaf4c5dd8077ee2d13c..37dd4f95cb5102bb06e4181c376762ef833d2430 100644
--- a/answer_classifier_cached_features/train.py
+++ b/answer_classifier_cached_features/train.py
@@ -880,9 +880,9 @@ class log_mgr():
                 eval_vars_dict['relevance_prob'].shape)
             print np.max(eval_vars_dict['relevance_prob'])
 
-            print 'Per region answer prob shape: {}'.format(
-                eval_vars_dict['per_region_answer_prob'].shape)
-            print np.max(eval_vars_dict['per_region_answer_prob'])
+            # print 'Per region answer prob shape: {}'.format(
+            #     eval_vars_dict['per_region_answer_prob'].shape)
+            # print np.max(eval_vars_dict['per_region_answer_prob'])
 
             print 'Learning Rate: {}'.format(eval_vars_dict['lr'])
 
@@ -984,7 +984,7 @@ if __name__=='__main__':
         'optimizer_op': optimizer.train_op,
         'word_vectors': graph.word_vec_mgr.word_vectors, 
         'relevance_prob': graph.relevance_inference.answer_region_prob[0],
-        'per_region_answer_prob': graph.answer_inference.per_region_answer_prob[0],
+        #'per_region_answer_prob': graph.answer_inference.per_region_answer_prob[0],
         'object_scores': graph.obj_atr_inference.object_scores,
         'attribute_scores': graph.obj_atr_inference.attribute_scores,
         'answer_scores': graph.answer_inference.answer_score[0],