diff --git a/object_attribute_classifier_cached_features/eval.py b/object_attribute_classifier_cached_features/eval.py
index 228f0d3920c968246f6d4c341758fcaef33e4952..9766cb2ee960448d4082d723fde4e9910edeb090 100644
--- a/object_attribute_classifier_cached_features/eval.py
+++ b/object_attribute_classifier_cached_features/eval.py
@@ -153,15 +153,9 @@ class eval_mgr():
                 region_ids,
                 region_paths)
 
-        if iter%500 == 0:
-            self.write_scores()
+        # if iter%500 == 0:
+        #     self.write_scores()
 
-            filename = os.path.join(
-                self.object_scores_dirname,
-                'object_predictions.json')
-
-            with open(filename,'w') as file:
-                ujson.dump(self.obj_pred_json,file,indent=4)
 
     def save_image_pred(
             self,
@@ -231,6 +225,13 @@ class eval_mgr():
                 labels[:,self.attribute_ids[i]].tolist())
 
     def write_scores(self):
+        filename = os.path.join(
+            self.object_scores_dirname,
+            'object_predictions.json')
+
+        with open(filename,'w') as file:
+            ujson.dump(self.obj_pred_json,file,indent=4)
+
         for i in xrange(10):
             filename = os.path.join(
                 self.attribute_scores_dirname,
@@ -384,7 +385,8 @@ def eval(
             # print 'Fall_out: {}'.format(evaluator.get_fall_out())
             # print 'AP: {}'.format(evaluator.get_ap())
             iter+=1
-
+        
+        evaluator.write_scores()
         # print 'Object accuracy: {}'.format(evaluator.get_object_accuracy())
         # print 'Recall: {}'.format(evaluator.get_recall())
         # print 'Precision: {}'.format(evaluator.get_precision())