Skip to content
Snippets Groups Projects
Commit cc6e5311 authored by lianhuiwang's avatar lianhuiwang Committed by Andrew Or
Browse files

[SPARK-5653][YARN] In ApplicationMaster rename isDriver to isClusterMode

in ApplicationMaster rename isDriver to isClusterMode,because in Client it uses isClusterMode,ApplicationMaster should keep consistent with it and uses isClusterMode.Also isClusterMode is easier to understand.
andrewor14 sryza

Author: lianhuiwang <lianhuiwang09@gmail.com>

Closes #4430 from lianhuiwang/am-isDriver-rename and squashes the following commits:

f9f3ed0 [lianhuiwang] rename isDriver to isClusterMode
parent 9ad56ad2
No related branches found
No related tags found
No related merge requests found
...@@ -54,7 +54,7 @@ private[spark] class ApplicationMaster( ...@@ -54,7 +54,7 @@ private[spark] class ApplicationMaster(
private val sparkConf = new SparkConf() private val sparkConf = new SparkConf()
private val yarnConf: YarnConfiguration = SparkHadoopUtil.get.newConfiguration(sparkConf) private val yarnConf: YarnConfiguration = SparkHadoopUtil.get.newConfiguration(sparkConf)
.asInstanceOf[YarnConfiguration] .asInstanceOf[YarnConfiguration]
private val isDriver = args.userClass != null private val isClusterMode = args.userClass != null
// Default to numExecutors * 2, with minimum of 3 // Default to numExecutors * 2, with minimum of 3
private val maxNumExecutorFailures = sparkConf.getInt("spark.yarn.max.executor.failures", private val maxNumExecutorFailures = sparkConf.getInt("spark.yarn.max.executor.failures",
...@@ -81,7 +81,7 @@ private[spark] class ApplicationMaster( ...@@ -81,7 +81,7 @@ private[spark] class ApplicationMaster(
try { try {
val appAttemptId = client.getAttemptId() val appAttemptId = client.getAttemptId()
if (isDriver) { if (isClusterMode) {
// Set the web ui port to be ephemeral for yarn so we don't conflict with // Set the web ui port to be ephemeral for yarn so we don't conflict with
// other spark processes running on the same box // other spark processes running on the same box
System.setProperty("spark.ui.port", "0") System.setProperty("spark.ui.port", "0")
...@@ -139,7 +139,7 @@ private[spark] class ApplicationMaster( ...@@ -139,7 +139,7 @@ private[spark] class ApplicationMaster(
// doAs in order for the credentials to be passed on to the executor containers. // doAs in order for the credentials to be passed on to the executor containers.
val securityMgr = new SecurityManager(sparkConf) val securityMgr = new SecurityManager(sparkConf)
if (isDriver) { if (isClusterMode) {
runDriver(securityMgr) runDriver(securityMgr)
} else { } else {
runExecutorLauncher(securityMgr) runExecutorLauncher(securityMgr)
...@@ -162,7 +162,7 @@ private[spark] class ApplicationMaster( ...@@ -162,7 +162,7 @@ private[spark] class ApplicationMaster(
* from the application code. * from the application code.
*/ */
final def getDefaultFinalStatus() = { final def getDefaultFinalStatus() = {
if (isDriver) { if (isClusterMode) {
FinalApplicationStatus.SUCCEEDED FinalApplicationStatus.SUCCEEDED
} else { } else {
FinalApplicationStatus.UNDEFINED FinalApplicationStatus.UNDEFINED
...@@ -243,7 +243,7 @@ private[spark] class ApplicationMaster( ...@@ -243,7 +243,7 @@ private[spark] class ApplicationMaster(
private def runAMActor( private def runAMActor(
host: String, host: String,
port: String, port: String,
isDriver: Boolean): Unit = { isClusterMode: Boolean): Unit = {
val driverUrl = AkkaUtils.address( val driverUrl = AkkaUtils.address(
AkkaUtils.protocol(actorSystem), AkkaUtils.protocol(actorSystem),
...@@ -251,7 +251,7 @@ private[spark] class ApplicationMaster( ...@@ -251,7 +251,7 @@ private[spark] class ApplicationMaster(
host, host,
port, port,
YarnSchedulerBackend.ACTOR_NAME) YarnSchedulerBackend.ACTOR_NAME)
actor = actorSystem.actorOf(Props(new AMActor(driverUrl, isDriver)), name = "YarnAM") actor = actorSystem.actorOf(Props(new AMActor(driverUrl, isClusterMode)), name = "YarnAM")
} }
private def runDriver(securityMgr: SecurityManager): Unit = { private def runDriver(securityMgr: SecurityManager): Unit = {
...@@ -272,7 +272,7 @@ private[spark] class ApplicationMaster( ...@@ -272,7 +272,7 @@ private[spark] class ApplicationMaster(
runAMActor( runAMActor(
sc.getConf.get("spark.driver.host"), sc.getConf.get("spark.driver.host"),
sc.getConf.get("spark.driver.port"), sc.getConf.get("spark.driver.port"),
isDriver = true) isClusterMode = true)
registerAM(sc.ui.map(_.appUIAddress).getOrElse(""), securityMgr) registerAM(sc.ui.map(_.appUIAddress).getOrElse(""), securityMgr)
userClassThread.join() userClassThread.join()
} }
...@@ -427,7 +427,7 @@ private[spark] class ApplicationMaster( ...@@ -427,7 +427,7 @@ private[spark] class ApplicationMaster(
sparkConf.set("spark.driver.host", driverHost) sparkConf.set("spark.driver.host", driverHost)
sparkConf.set("spark.driver.port", driverPort.toString) sparkConf.set("spark.driver.port", driverPort.toString)
runAMActor(driverHost, driverPort.toString, isDriver = false) runAMActor(driverHost, driverPort.toString, isClusterMode = false)
} }
/** Add the Yarn IP filter that is required for properly securing the UI. */ /** Add the Yarn IP filter that is required for properly securing the UI. */
...@@ -435,7 +435,7 @@ private[spark] class ApplicationMaster( ...@@ -435,7 +435,7 @@ private[spark] class ApplicationMaster(
val proxyBase = System.getenv(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV) val proxyBase = System.getenv(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV)
val amFilter = "org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter" val amFilter = "org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter"
val params = client.getAmIpFilterParams(yarnConf, proxyBase) val params = client.getAmIpFilterParams(yarnConf, proxyBase)
if (isDriver) { if (isClusterMode) {
System.setProperty("spark.ui.filters", amFilter) System.setProperty("spark.ui.filters", amFilter)
params.foreach { case (k, v) => System.setProperty(s"spark.$amFilter.param.$k", v) } params.foreach { case (k, v) => System.setProperty(s"spark.$amFilter.param.$k", v) }
} else { } else {
...@@ -491,7 +491,7 @@ private[spark] class ApplicationMaster( ...@@ -491,7 +491,7 @@ private[spark] class ApplicationMaster(
/** /**
* An actor that communicates with the driver's scheduler backend. * An actor that communicates with the driver's scheduler backend.
*/ */
private class AMActor(driverUrl: String, isDriver: Boolean) extends Actor { private class AMActor(driverUrl: String, isClusterMode: Boolean) extends Actor {
var driver: ActorSelection = _ var driver: ActorSelection = _
override def preStart() = { override def preStart() = {
...@@ -503,7 +503,7 @@ private[spark] class ApplicationMaster( ...@@ -503,7 +503,7 @@ private[spark] class ApplicationMaster(
driver ! RegisterClusterManager driver ! RegisterClusterManager
// In cluster mode, the AM can directly monitor the driver status instead // In cluster mode, the AM can directly monitor the driver status instead
// of trying to deduce it from the lifecycle of the driver's actor // of trying to deduce it from the lifecycle of the driver's actor
if (!isDriver) { if (!isClusterMode) {
context.system.eventStream.subscribe(self, classOf[RemotingLifecycleEvent]) context.system.eventStream.subscribe(self, classOf[RemotingLifecycleEvent])
} }
} }
...@@ -513,7 +513,7 @@ private[spark] class ApplicationMaster( ...@@ -513,7 +513,7 @@ private[spark] class ApplicationMaster(
logInfo(s"Driver terminated or disconnected! Shutting down. $x") logInfo(s"Driver terminated or disconnected! Shutting down. $x")
// In cluster mode, do not rely on the disassociated event to exit // In cluster mode, do not rely on the disassociated event to exit
// This avoids potentially reporting incorrect exit codes if the driver fails // This avoids potentially reporting incorrect exit codes if the driver fails
if (!isDriver) { if (!isClusterMode) {
finish(FinalApplicationStatus.SUCCEEDED, ApplicationMaster.EXIT_SUCCESS) finish(FinalApplicationStatus.SUCCEEDED, ApplicationMaster.EXIT_SUCCESS)
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment