diff --git a/answer_classifier_cached_features/inference.py b/answer_classifier_cached_features/inference.py
index c8267174a6a3f06c2a3a032678612cd705d9b240..8ddbe60a48f596bdb038f481eccfa3b273b9f1c2 100644
--- a/answer_classifier_cached_features/inference.py
+++ b/answer_classifier_cached_features/inference.py
@@ -1,5 +1,6 @@
 from tftools import layers
 import tensorflow as tf
+import constants
 import pdb
 
 class AnswerInference():
@@ -14,11 +15,13 @@ class AnswerInference():
             adjective_embed,
             num_answers,
             space_dim,
-            keep_prob):
+            keep_prob,
+            is_training=True):
         
         self.batch_size = len(object_feat)
         self.object_feat = object_feat
         self.attribute_feat = attribute_feat
+        self.num_regions = constants.num_region_proposals
         self.answer_region_scores = answer_region_scores
         self.question_vert_concat = question_vert_concat
         self.answers_vert_concat = answers_vert_concat
@@ -26,6 +29,7 @@ class AnswerInference():
         self.adjective_embed = adjective_embed
         self.num_answers = num_answers
         self.keep_prob = keep_prob
+        self.is_training = is_training
         self.ordered_noun_keys = ['positive_nouns']
         self.ordered_adjective_keys = ['positive_adjectives']
         for i in xrange(self.num_answers-1):
@@ -53,8 +57,6 @@ class AnswerInference():
                     reuse_vars = False
                 else:
                     reuse_vars = True
-                
-                num_regions = 100
 
                 q_feat = tf.reshape(
                     self.question_vert_concat[j],
@@ -64,7 +66,7 @@ class AnswerInference():
         
                 q_feat = tf.tile(
                     q_feat,
-                    [num_regions, self.num_answers, 1])
+                    [self.num_regions, self.num_answers, 1])
 
                 a_feat = tf.expand_dims(
                     self.answers_vert_concat[j],
@@ -72,7 +74,7 @@ class AnswerInference():
 
                 a_feat = tf.tile(
                     a_feat,
-                    [num_regions, 1, 1])
+                    [self.num_regions, 1, 1])
 
                 obj_atr_qa_feat = tf.concat(
                     2,
@@ -89,9 +91,14 @@ class AnswerInference():
                     1,
                     2500,
                     'per_region_ans_score_conv_1',
-                    func = tf.nn.relu,
+                    func = None,
                     reuse_vars = reuse_vars)
 
+                self.per_region_answer_scores[j] = tf.nn.relu(
+                    self.batch_norm(
+                        self.per_region_answer_scores[j],
+                        is_training))                
+
                 self.per_region_answer_scores[j] = layers.conv2d(
                     self.per_region_answer_scores[j],
                     1,
@@ -123,93 +130,80 @@ class AnswerInference():
     def inner_product_selection(self, obj_feat, atr_feat, noun_embed, adjective_embed):
         feats = []
         for k in xrange(18):
-            scores = tf.matmul(
-                obj_feat,
-                tf.transpose(noun_embed[k]))
+            scores = tf.matmul(obj_feat, tf.transpose(noun_embed[k]))
             scores1 = tf.nn.softmax(scores)
-            feat1 = tf.matmul(
-                scores1,
-                noun_embed[k])
+            feat1 = tf.matmul(scores1, noun_embed[k])
             
-            scores = tf.matmul(
-                atr_feat,
-                tf.transpose(adjective_embed[k]))
+            scores = tf.matmul(atr_feat, tf.transpose(adjective_embed[k]))
             scores2 = tf.nn.softmax(scores)
-            feat2 = tf.matmul(
-                scores2,
-                adjective_embed[k])
-        
-            feat = tf.concat(
-                1,
-                [feat1, feat2])
+            feat2 = tf.matmul(scores2, adjective_embed[k])
+
+            scores1_ = tf.matmul(obj_feat, tf.transpose(feat1))
+            scores2_ = tf.matmul(atr_feat, tf.transpose(feat2))
 
-            # feat = tf.tile(
-            #     tf.expand_dims(feat,1),
-            #     [1,self.num_answers,1])
+            feat = tf.concat(1, [feat1, feat2, scores1_, scores2_])
 
             feats.append(feat)
 
-        feats = tf.pack(feats)
-        feats = tf.transpose(
-            feats,
-            [1,0,2])
+        feats = tf.transpose(tf.pack(feats), [1,0,2])
+
         return feats
                 
-    def elementwise_product(self, obj_feat, atr_feat, ques_feat, ans_feat):
-        tiled_ques = tf.tile(tf.reshape(ques_feat,[1, -1]),[self.num_answers,1])
-        qa_feat = tf.concat(
-            1,
-            [tiled_ques, ans_feat])
-        qa_feat = tf.tile(qa_feat, [1,2]) 
+    # def elementwise_product(self, obj_feat, atr_feat, ques_feat, ans_feat):
+    #     tiled_ques = tf.tile(tf.reshape(ques_feat,[1, -1]),[self.num_answers,1])
+    #     qa_feat = tf.concat(
+    #         1,
+    #         [tiled_ques, ans_feat])
+    #     qa_feat = tf.tile(qa_feat, [1,2]) 
         
-        obj_atr_feat = tf.concat(
-            1,
-            [obj_feat, atr_feat])
-        obj_atr_feat = tf.tile(obj_atr_feat, [1,5])
-        obj_atr_feat = tf.expand_dims(obj_atr_feat,1)
+    #     obj_atr_feat = tf.concat(
+    #         1,
+    #         [obj_feat, atr_feat])
+    #     obj_atr_feat = tf.tile(obj_atr_feat, [1,5])
+    #     obj_atr_feat = tf.expand_dims(obj_atr_feat,1)
         
-        feat = obj_atr_feat*qa_feat
+    #     feat = obj_atr_feat*qa_feat
         
-        return feat
+    #     return feat
                 
             
 
-    def concat_object_attribute(self, object_feat, attribute_feat):
-        object_attribute = tf.concat(
-            1,
-            [object_feat, attribute_feat])
+    # def concat_object_attribute(self, object_feat, attribute_feat):
+    #     object_attribute = tf.concat(
+    #         1,
+    #         [object_feat, attribute_feat])
 
-        return object_attribute
+    #     return object_attribute
 
 
-    def project_question_answer(self, question, answer, space_dim, scope_name, reuse):
-        with tf.variable_scope(scope_name, reuse=reuse):
-            question_replicated = tf.tile(
-                question,
-                [self.num_answers, 1],
-                name='replicate_questions')
-            qa = tf.concat(
-                1,
-                [question_replicated, answer])
+    # def project_question_answer(self, question, answer, space_dim, scope_name, reuse):
+    #     with tf.variable_scope(scope_name, reuse=reuse):
+    #         question_replicated = tf.tile(
+    #             question,
+    #             [self.num_answers, 1],
+    #             name='replicate_questions')
+    #         qa = tf.concat(
+    #             1,
+    #             [question_replicated, answer])
                 
-            qa_proj1 = tf.nn.dropout(layers.full(qa, 1000, 'fc1'), self.keep_prob)
-            qa_proj2 = tf.nn.dropout(layers.full(qa_proj1, space_dim, 'fc2'), self.keep_prob)
+    #         qa_proj1 = tf.nn.dropout(layers.full(qa, 1000, 'fc1'), self.keep_prob)
+    #         qa_proj2 = tf.nn.dropout(layers.full(qa_proj1, space_dim, 'fc2'), self.keep_prob)
 
-            # qa_proj1 = layers.full(qa, 1000, 'fc1')
-            # qa_proj2 = layers.full(qa_proj1, space_dim, 'fc2')
+    #         # qa_proj1 = layers.full(qa, 1000, 'fc1')
+    #         # qa_proj2 = layers.full(qa_proj1, space_dim, 'fc2')
 
-        return qa_proj2
+    #     return qa_proj2
                 
-    def project_object_attribute(self, object_feat, attribute_feat, space_dim, scope_name, reuse):
-        with tf.variable_scope(scope_name, reuse=reuse):
-            obj_atr_feat = tf.concat(
-                1,
-                [object_feat, attribute_feat],
-                name='object_attribute_concat')
-
-            obj_atr_proj1 = tf.nn.dropout(layers.full(obj_atr_feat, 600, 'fc1'), self.keep_prob)
-            obj_atr_proj2 = tf.nn.dropout(layers.full(obj_atr_proj1, space_dim, 'fc2'), self.keep_prob)
-
-            # obj_atr_proj1 = layers.full(obj_atr_feat, 600, 'fc1')
-            # obj_atr_proj2 = layers.full(obj_atr_proj1, space_dim, 'fc2')
-        return obj_atr_proj2
+    # def project_object_attribute(self, object_feat, attribute_feat, space_dim, scope_name, reuse):
+    #     with tf.variable_scope(scope_name, reuse=reuse):
+    #         obj_atr_feat = tf.concat(
+    #             1,
+    #             [object_feat, attribute_feat],
+    #             name='object_attribute_concat')
+
+    #         obj_atr_proj1 = tf.nn.dropout(layers.full(obj_atr_feat, 600, 'fc1'), self.keep_prob)
+    #         obj_atr_proj2 = tf.nn.dropout(layers.full(obj_atr_proj1, space_dim, 'fc2'), self.keep_prob)
+
+    #         # obj_atr_proj1 = layers.full(obj_atr_feat, 600, 'fc1')
+    #         # obj_atr_proj2 = layers.full(obj_atr_proj1, space_dim, 'fc2')
+    #     return obj_atr_proj2
diff --git a/answer_classifier_cached_features/train.py b/answer_classifier_cached_features/train.py
index 968ee77332d0f2c6cd0c95741cc3e21940edfe62..14f7f4e015a7cfe676840881b0692ebabe39e65d 100644
--- a/answer_classifier_cached_features/train.py
+++ b/answer_classifier_cached_features/train.py
@@ -107,7 +107,8 @@ class graph_creator():
                 self.adjective_embed,
                 self.num_neg_answers + 1,
                 self.space_dim,
-                self.plh['keep_prob'])
+                self.plh['keep_prob'],
+                training)
 
             self.add_losses()
             self.add_accuracy_computation()