Skip to content
Snippets Groups Projects
Commit f231aaa2 authored by tgravescs's avatar tgravescs
Browse files

move the hadoopJobMetadata back into SparkEnv

parent 54d9c6f2
No related branches found
No related tags found
No related merge requests found
......@@ -31,6 +31,7 @@ import org.apache.spark.serializer.{Serializer, SerializerManager}
import org.apache.spark.util.{Utils, AkkaUtils}
import org.apache.spark.api.python.PythonWorkerFactory
import com.google.common.collect.MapMaker
/**
* Holds all the runtime environment objects for a running Spark instance (either master or worker),
......@@ -57,6 +58,10 @@ class SparkEnv (
private val pythonWorkers = mutable.HashMap[(String, Map[String, String]), PythonWorkerFactory]()
// A general, soft-reference map for metadata needed during HadoopRDD split computation
// (e.g., HadoopFileRDD uses this to cache JobConfs and InputFormats).
private[spark] val hadoopJobMetadata = new MapMaker().softValues().makeMap[String, Any]()
def stop() {
pythonWorkers.foreach { case(key, worker) => worker.stop() }
httpFileServer.stop()
......
......@@ -20,19 +20,13 @@ package org.apache.spark.deploy
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapred.JobConf
import com.google.common.collect.MapMaker
import org.apache.spark.SparkException
/**
* Contains util methods to interact with Hadoop from Spark.
*/
private[spark]
class SparkHadoopUtil {
// A general, soft-reference map for metadata needed during HadoopRDD split computation
// (e.g., HadoopFileRDD uses this to cache JobConfs and InputFormats).
private[spark] val hadoopJobMetadata = new MapMaker().softValues().makeMap[String, Any]()
/**
* Return an appropriate (subclass) of Configuration. Creating config can initializes some Hadoop
......@@ -47,7 +41,6 @@ class SparkHadoopUtil {
def addCredentials(conf: JobConf) {}
def isYarnMode(): Boolean = { false }
}
object SparkHadoopUtil {
......
......@@ -199,10 +199,10 @@ private[spark] object HadoopRDD {
* The three methods below are helpers for accessing the local map, a property of the SparkEnv of
* the local process.
*/
def getCachedMetadata(key: String) = SparkHadoopUtil.get.hadoopJobMetadata.get(key)
def getCachedMetadata(key: String) = SparkEnv.get.hadoopJobMetadata.get(key)
def containsCachedMetadata(key: String) = SparkHadoopUtil.get.hadoopJobMetadata.containsKey(key)
def containsCachedMetadata(key: String) = SparkEnv.get.hadoopJobMetadata.containsKey(key)
def putCachedMetadata(key: String, value: Any) =
SparkHadoopUtil.get.hadoopJobMetadata.put(key, value)
SparkEnv.get.hadoopJobMetadata.put(key, value)
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment