From ce18b50d5ff37dc5c558d0602321a61887dd8b48 Mon Sep 17 00:00:00 2001
From: BlackNiuza <shiyun.wxm@taobao.com>
Date: Wed, 10 Jul 2013 19:11:43 +0800
Subject: [PATCH] set SUCCEEDED for all master in shutdown hook

---
 .../scala/spark/deploy/yarn/ApplicationMaster.scala       | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/core/src/hadoop2-yarn/scala/spark/deploy/yarn/ApplicationMaster.scala b/core/src/hadoop2-yarn/scala/spark/deploy/yarn/ApplicationMaster.scala
index 776db201f9..68bb36d316 100644
--- a/core/src/hadoop2-yarn/scala/spark/deploy/yarn/ApplicationMaster.scala
+++ b/core/src/hadoop2-yarn/scala/spark/deploy/yarn/ApplicationMaster.scala
@@ -135,6 +135,9 @@ class ApplicationMaster(args: ApplicationMasterArguments, conf: Configuration) e
           var mainArgs: Array[String] = new Array[String](args.userArgs.size())
           args.userArgs.copyToArray(mainArgs, 0, args.userArgs.size())
           mainMethod.invoke(null, mainArgs)
+          // some job script has "System.exit(0)" at the end, for example SparkPi, SparkLR
+          // userThread will stop here unless it has uncaught exception thrown out
+          // It need shutdown hook to set SUCCEEDED
           successed = true
         } finally {
           if(successed){
@@ -308,11 +311,8 @@ object ApplicationMaster {
           logInfo("Invoking sc stop from shutdown hook") 
           sc.stop() 
           // best case ...
-          // due to the sparkContext is stopped and ApplicationMaster is down,
-          // the status of registered masters should be set KILLED better than FAILED.
-          // need discussion
           for (master <- applicationMasters) {
-            master.finishApplicationMaster(FinalApplicationStatus.KILLED)
+            master.finishApplicationMaster(FinalApplicationStatus.SUCCEEDED)
           }
         } 
       } )
-- 
GitLab