diff --git a/core/src/main/scala/spark/deploy/worker/ExecutorRunner.scala b/core/src/main/scala/spark/deploy/worker/ExecutorRunner.scala index 7423eca11e03b29294860fdc57db9d007b89b38f..a5ea4fe839215867bf98dcc8e4a3d50f3d07c7f4 100644 --- a/core/src/main/scala/spark/deploy/worker/ExecutorRunner.scala +++ b/core/src/main/scala/spark/deploy/worker/ExecutorRunner.scala @@ -169,10 +169,10 @@ private[spark] class ExecutorRunner( // Redirect its stdout and stderr to files val stdout = new File(executorDir, "stdout") - Files.write(header, stdout, Charsets.UTF_8) redirectStream(process.getInputStream, stdout) val stderr = new File(executorDir, "stderr") + Files.write(header, stderr, Charsets.UTF_8) redirectStream(process.getErrorStream, stderr) // Wait for it to exit; this is actually a bad thing if it happens, because we expect to run