diff --git a/core/src/main/scala/org/apache/spark/NewAccumulator.scala b/core/src/main/scala/org/apache/spark/NewAccumulator.scala
index edb9b741a87123dcb637c0f6e42bdcc74a499752..aa21ccc1ff3d99e381a5410ce7b39ae3576ca458 100644
--- a/core/src/main/scala/org/apache/spark/NewAccumulator.scala
+++ b/core/src/main/scala/org/apache/spark/NewAccumulator.scala
@@ -22,6 +22,8 @@ import java.io.ObjectInputStream
 import java.util.concurrent.atomic.AtomicLong
 import javax.annotation.concurrent.GuardedBy
 
+import scala.collection.JavaConverters._
+
 import org.apache.spark.scheduler.AccumulableInfo
 import org.apache.spark.util.Utils
 
@@ -57,7 +59,7 @@ abstract class NewAccumulator[IN, OUT] extends Serializable {
    * registered before ues, or it will throw exception.
    */
   final def isRegistered: Boolean =
-    metadata != null && AccumulatorContext.originals.containsKey(metadata.id)
+    metadata != null && AccumulatorContext.get(metadata.id).isDefined
 
   private def assertMetadataNotNull(): Unit = {
     if (metadata == null) {
@@ -197,7 +199,7 @@ private[spark] object AccumulatorContext {
    * TODO: Don't use a global map; these should be tied to a SparkContext (SPARK-13051).
    */
   @GuardedBy("AccumulatorContext")
-  val originals = new java.util.HashMap[Long, jl.ref.WeakReference[NewAccumulator[_, _]]]
+  private val originals = new java.util.HashMap[Long, jl.ref.WeakReference[NewAccumulator[_, _]]]
 
   private[this] val nextId = new AtomicLong(0L)
 
@@ -207,6 +209,10 @@ private[spark] object AccumulatorContext {
    */
   def newId(): Long = nextId.getAndIncrement
 
+  def numAccums: Int = synchronized(originals.size)
+
+  def accumIds: Set[Long] = synchronized(originals.keySet().asScala.toSet)
+
   /**
    * Register an [[Accumulator]] created on the driver such that it can be used on the executors.
    *
diff --git a/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala b/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
index 5f97e58845d7a0577d09f969ece4c0cd19e5d67a..9c90049715ddc221000724fc6ec19fe051092810 100644
--- a/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
+++ b/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
@@ -191,7 +191,7 @@ class AccumulatorSuite extends SparkFunSuite with Matchers with LocalSparkContex
     assert(ref.get.isEmpty)
 
     AccumulatorContext.remove(accId)
-    assert(!AccumulatorContext.originals.containsKey(accId))
+    assert(!AccumulatorContext.get(accId).isDefined)
   }
 
   test("get accum") {
diff --git a/core/src/test/scala/org/apache/spark/InternalAccumulatorSuite.scala b/core/src/test/scala/org/apache/spark/InternalAccumulatorSuite.scala
index e4474bb813d5e39ee323394384549d98ae23975b..972e31c4114f1aa19a4aa869ace2bf63a06f3c14 100644
--- a/core/src/test/scala/org/apache/spark/InternalAccumulatorSuite.scala
+++ b/core/src/test/scala/org/apache/spark/InternalAccumulatorSuite.scala
@@ -183,18 +183,18 @@ class InternalAccumulatorSuite extends SparkFunSuite with LocalSparkContext {
       private val myCleaner = new SaveAccumContextCleaner(this)
       override def cleaner: Option[ContextCleaner] = Some(myCleaner)
     }
-    assert(AccumulatorContext.originals.isEmpty)
+    assert(AccumulatorContext.numAccums == 0)
     sc.parallelize(1 to 100).map { i => (i, i) }.reduceByKey { _ + _ }.count()
     val numInternalAccums = TaskMetrics.empty.internalAccums.length
     // We ran 2 stages, so we should have 2 sets of internal accumulators, 1 for each stage
-    assert(AccumulatorContext.originals.size === numInternalAccums * 2)
+    assert(AccumulatorContext.numAccums === numInternalAccums * 2)
     val accumsRegistered = sc.cleaner match {
       case Some(cleaner: SaveAccumContextCleaner) => cleaner.accumsRegisteredForCleanup
       case _ => Seq.empty[Long]
     }
     // Make sure the same set of accumulators is registered for cleanup
     assert(accumsRegistered.size === numInternalAccums * 2)
-    assert(accumsRegistered.toSet === AccumulatorContext.originals.keySet().asScala)
+    assert(accumsRegistered.toSet === AccumulatorContext.accumIds)
   }
 
   /**
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
index 0e6356b5781e6838fff7c179a328aa347b38536d..1095a73c58291b7768bfc4ff28e479c7c2c942c4 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
@@ -334,10 +334,10 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
     sql("SELECT * FROM t2").count()
 
     AccumulatorContext.synchronized {
-      val accsSize = AccumulatorContext.originals.size
+      val accsSize = AccumulatorContext.numAccums
       sqlContext.uncacheTable("t1")
       sqlContext.uncacheTable("t2")
-      assert((accsSize - 2) == AccumulatorContext.originals.size)
+      assert((accsSize - 2) == AccumulatorContext.numAccums)
     }
   }