diff --git a/answer_classifier_cached_features/select_best_model.py b/answer_classifier_cached_features/select_best_model.py
index 385c21f3cb8a5fa91b0372932d27528c0944a9fc..038b5ab630c3d1f98e027abbafa0fd930e9601c7 100644
--- a/answer_classifier_cached_features/select_best_model.py
+++ b/answer_classifier_cached_features/select_best_model.py
@@ -237,13 +237,14 @@ def eval_model(model_to_eval, results_json):
         constants.answer_batch_size,
         constants.image_size,
         constants.num_negative_answers,
-        constants.answer_embedding_dim,
         constants.answer_regularization_coeff,
         constants.answer_batch_size*constants.num_region_proposals,
         0,
         0,
         0,
         constants.answer_obj_atr_loss_wt,
+        constants.answer_ans_loss_wt,
+        constants.answer_mil_loss_wt,
         resnet_feat_dim=constants.resnet_feat_dim,
         training=False)
 
diff --git a/object_attribute_classifier_cached_features/select_best_model.py b/object_attribute_classifier_cached_features/select_best_model.py
index f250ea7f58989017f9e324c9e7d35c2e46214b37..51f400c340bcf4ae989ddf35aeca027375f19615 100644
--- a/object_attribute_classifier_cached_features/select_best_model.py
+++ b/object_attribute_classifier_cached_features/select_best_model.py
@@ -93,9 +93,13 @@ class eval_mgr():
              labels):
         self.num_iter += 1.0
        
-        self.eval_object_accuracy(
+        # self.eval_object_accuracy(
+        #     eval_vars_dict['object_prob'],
+        #     labels['objects'])
+        self.top_k_accuracy(
             eval_vars_dict['object_prob'],
-            labels['objects'])
+            labels['objects'],
+            5)
 
         self.eval_attribute_pr(
             eval_vars_dict['attribute_prob'],
@@ -129,6 +133,25 @@ class eval_mgr():
             with open(filename, 'w') as file:
                 ujson.dump(self.labels_dict[i], file, indent=4)
                 
+    def top_k_accuracy(
+            self,
+            prob,
+            labels,
+            k):
+
+        num_samples, num_classes = prob.shape
+        ids = np.arange(num_classes)
+        accuracy = 0.0
+        for i in xrange(num_samples):
+            gt_ids = set(np.where(labels[i,:]>0.5)[0].tolist())
+            top_k = set(np.argsort(prob[i,:]).tolist()[-1:-1-k:-1])
+            count = 0.0
+            for idx in gt_ids:
+                if idx in top_k:
+                    count += 1.0
+            accuracy += count/max(len(gt_ids),1)
+        self.object_accuracy += accuracy/num_samples
+
     def eval_object_accuracy(
             self,
             prob,
@@ -316,7 +339,8 @@ def model_path_generator(models_dir, start_model, step_size):
 
 if __name__=='__main__':
     model_paths = model_path_generator(
-        constants.region_output_dir,
+        constants.answer_output_dir,
+        # constants.region_output_dir,
         constants.region_start_model,
         constants.region_step_size)