Skip to content
Snippets Groups Projects
Commit 2c421749 authored by Matei Zaharia's avatar Matei Zaharia
Browse files

Address review comments

parent 044c8ad3
No related branches found
No related tags found
No related merge requests found
......@@ -19,7 +19,7 @@ package org.apache.spark.deploy
private[spark] class ApplicationDescription(
val name: String,
val maxCores: Int, /* Integer.MAX_VALUE denotes an unlimited number of cores */
val maxCores: Option[Int],
val memoryPerSlave: Int,
val command: Command,
val sparkHome: String,
......
......@@ -48,7 +48,7 @@ private[spark] object TestClient {
val (actorSystem, port) = AkkaUtils.createActorSystem("spark", Utils.localIpAddress, 0,
conf = new SparkConf)
val desc = new ApplicationDescription(
"TestClient", 1, 512, Command("spark.deploy.client.TestExecutor", Seq(), Map()),
"TestClient", Some(1), 512, Command("spark.deploy.client.TestExecutor", Seq(), Map()),
"dummy-spark-home", "ignored")
val listener = new TestListener
val client = new Client(actorSystem, Array(url), desc, listener, new SparkConf)
......
......@@ -82,7 +82,7 @@ private[spark] class ApplicationInfo(
}
}
private val myMaxCores = if (desc.maxCores == Int.MaxValue) defaultCores else desc.maxCores
private val myMaxCores = desc.maxCores.getOrElse(defaultCores)
def coresLeft: Int = myMaxCores - coresGranted
......
......@@ -92,6 +92,9 @@ private[spark] class Master(host: String, port: Int, webUiPort: Int) extends Act
// Default maxCores for applications that don't specify it (i.e. pass Int.MaxValue)
val defaultCores = conf.getInt("spark.deploy.defaultCores", Int.MaxValue)
if (defaultCores < 1) {
throw new SparkException("spark.deploy.defaultCores must be positive")
}
override def preStart() {
logInfo("Starting Spark master at " + masterUrl)
......
......@@ -38,7 +38,7 @@ private[spark] class SparkDeploySchedulerBackend(
var stopping = false
var shutdownCallback : (SparkDeploySchedulerBackend) => Unit = _
val maxCores = conf.get("spark.cores.max", Int.MaxValue.toString).toInt
val maxCores = conf.getOption("spark.cores.max").map(_.toInt)
override def start() {
super.start()
......
......@@ -70,7 +70,7 @@ class JsonProtocolSuite extends FunSuite {
def createAppDesc() : ApplicationDescription = {
val cmd = new Command("mainClass", List("arg1", "arg2"), Map())
new ApplicationDescription("name", 4, 1234, cmd, "sparkHome", "appUiUrl")
new ApplicationDescription("name", Some(4), 1234, cmd, "sparkHome", "appUiUrl")
}
def createAppInfo() : ApplicationInfo = {
new ApplicationInfo(
......
......@@ -27,7 +27,7 @@ class ExecutorRunnerTest extends FunSuite {
test("command includes appId") {
def f(s:String) = new File(s)
val sparkHome = sys.env.get("SPARK_HOME").orElse(sys.props.get("spark.home")).get
val appDesc = new ApplicationDescription("app name", 8, 500, Command("foo", Seq(),Map()),
val appDesc = new ApplicationDescription("app name", Some(8), 500, Command("foo", Seq(),Map()),
sparkHome, "appUiUrl")
val appId = "12345-worker321-9876"
val er = new ExecutorRunner(appId, 1, appDesc, 8, 500, null, "blah", "worker321", f(sparkHome),
......
......@@ -418,7 +418,7 @@ Apart from these, the following properties are also available, and may be useful
Whether the standalone cluster manager should spread applications out across nodes or try
to consolidate them onto as few nodes as possible. Spreading out is usually better for
data locality in HDFS, but consolidating is more efficient for compute-intensive workloads. <br/>
<b>Note:</b> this setting needs to be configured in the cluster master, not in individual
<b>Note:</b> this setting needs to be configured in the standalone cluster master, not in individual
applications; you can set it through <code>SPARK_JAVA_OPTS</code> in <code>spark-env.sh</code>.
</td>
</tr>
......@@ -431,7 +431,7 @@ Apart from these, the following properties are also available, and may be useful
cores unless they configure <code>spark.cores.max</code> themselves.
Set this lower on a shared cluster to prevent users from grabbing
the whole cluster by default. <br/>
<b>Note:</b> this setting needs to be configured in the cluster master, not in individual
<b>Note:</b> this setting needs to be configured in the standalone cluster master, not in individual
applications; you can set it through <code>SPARK_JAVA_OPTS</code> in <code>spark-env.sh</code>.
</td>
</tr>
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment