diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala index 7d7bf88b9eb1215126fcc869aa393c795c3d7f39..cd179cf32805f822cd67a69d6ab6f39e77882cb6 100644 --- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala +++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala @@ -152,13 +152,13 @@ private[spark] class ApplicationMaster( val isLastAttempt = client.getAttemptId().getAttemptId() >= maxAppAttempts if (!finished) { - // This happens when the user application calls System.exit(). We have the choice - // of either failing or succeeding at this point. We report success to avoid - // retrying applications that have succeeded (System.exit(0)), which means that - // applications that explicitly exit with a non-zero status will also show up as - // succeeded in the RM UI. + // The default state of ApplicationMaster is failed if it is invoked by shut down hook. + // This behavior is different compared to 1.x version. + // If user application is exited ahead of time by calling System.exit(N), here mark + // this application as failed with EXIT_EARLY. For a good shutdown, user shouldn't call + // System.exit(0) to terminate the application. finish(finalStatus, - ApplicationMaster.EXIT_SUCCESS, + ApplicationMaster.EXIT_EARLY, "Shutdown hook called before final status was reported.") } @@ -209,7 +209,7 @@ private[spark] class ApplicationMaster( */ final def getDefaultFinalStatus(): FinalApplicationStatus = { if (isClusterMode) { - FinalApplicationStatus.SUCCEEDED + FinalApplicationStatus.FAILED } else { FinalApplicationStatus.UNDEFINED } @@ -653,6 +653,7 @@ object ApplicationMaster extends Logging { private val EXIT_SC_NOT_INITED = 13 private val EXIT_SECURITY = 14 private val EXIT_EXCEPTION_USER_CLASS = 15 + private val EXIT_EARLY = 16 private var master: ApplicationMaster = _