Skip to content
Snippets Groups Projects
Commit b7bfae1a authored by Kay Ousterhout's avatar Kay Ousterhout
Browse files

Correctly merged in maxTaskFailures fix

parent b8ae096a
No related branches found
No related tags found
No related merge requests found
......@@ -540,7 +540,7 @@ private[spark] class TaskSetManager(
if (numFailures(index) >= maxTaskFailures) {
logError("Task %s:%d failed %d times; aborting job".format(
taskSet.id, index, maxTaskFailures))
abort("Task %s:%d failed more than %d times (most recent failure: %s)".format(
abort("Task %s:%d failed %d times (most recent failure: %s)".format(
taskSet.id, index, maxTaskFailures, failureReason))
}
}
......
......@@ -42,7 +42,7 @@ class FailureSuite extends FunSuite with LocalSparkContext {
// Run a 3-task map job in which task 1 deterministically fails once, and check
// whether the job completes successfully and we ran 4 tasks in total.
test("failure in a single-stage job") {
sc = new SparkContext("local[1,1]", "test")
sc = new SparkContext("local[1,2]", "test")
val results = sc.makeRDD(1 to 3, 3).map { x =>
FailureSuiteState.synchronized {
FailureSuiteState.tasksRun += 1
......@@ -62,7 +62,7 @@ class FailureSuite extends FunSuite with LocalSparkContext {
// Run a map-reduce job in which a reduce task deterministically fails once.
test("failure in a two-stage job") {
sc = new SparkContext("local[1,1]", "test")
sc = new SparkContext("local[1,2]", "test")
val results = sc.makeRDD(1 to 3).map(x => (x, x)).groupByKey(3).map {
case (k, v) =>
FailureSuiteState.synchronized {
......
......@@ -53,7 +53,7 @@ class SparkContextSchedulerCreationSuite
test("local-n") {
val sched = createTaskScheduler("local[5]")
assert(sched.maxTaskFailures === 0)
assert(sched.maxTaskFailures === 1)
sched.backend match {
case s: LocalBackend => assert(s.totalCores === 5)
case _ => fail()
......
......@@ -89,7 +89,7 @@ class TaskResultGetterSuite extends FunSuite with BeforeAndAfter with BeforeAndA
test("task retried if result missing from block manager") {
// Set the maximum number of task failures to > 0, so that the task set isn't aborted
// after the result is missing.
sc = new SparkContext("local[1,1]", "test")
sc = new SparkContext("local[1,2]", "test")
// If this test hangs, it's probably because no resource offers were made after the task
// failed.
val scheduler: TaskSchedulerImpl = sc.taskScheduler match {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment