Skip to content
Snippets Groups Projects
Commit 80127935 authored by Jakob Odersky's avatar Jakob Odersky Committed by Davies Liu
Browse files

[SPARK-10001] [CORE] Interrupt tasks in repl with Ctrl+C

## What changes were proposed in this pull request?

Improve signal handling to allow interrupting running tasks from the REPL (with Ctrl+C).
If no tasks are running or Ctrl+C is pressed twice, the signal is forwarded to the default handler resulting in the usual termination of the application.

This PR is a rewrite of -- and therefore closes #8216 -- as per piaozhexiu's request

## How was this patch tested?
Signal handling is not easily testable therefore no unit tests were added. Nevertheless, the new functionality is implemented in a best-effort approach, soft-failing in case signals aren't available on a specific OS.

Author: Jakob Odersky <jakob@odersky.com>

Closes #12557 from jodersky/SPARK-10001-sigint.
parent 3405cc77
No related branches found
No related tags found
No related merge requests found
......@@ -17,44 +17,20 @@
package org.apache.spark.util
import org.apache.commons.lang3.SystemUtils
import org.slf4j.Logger
import sun.misc.{Signal, SignalHandler}
/**
* Used to log signals received. This can be very useful in debugging crashes or kills.
*
* Inspired by Colin Patrick McCabe's similar class from Hadoop.
*/
private[spark] object SignalLogger {
private var registered = false
/** Register a signal handler to log signals on UNIX-like systems. */
def register(log: Logger): Unit = synchronized {
if (SystemUtils.IS_OS_UNIX) {
require(!registered, "Can't re-install the signal handlers")
registered = true
val signals = Seq("TERM", "HUP", "INT")
for (signal <- signals) {
try {
new SignalLoggerHandler(signal, log)
} catch {
case e: Exception => log.warn("Failed to register signal handler " + signal, e)
}
}
log.info("Registered signal handlers for [" + signals.mkString(", ") + "]")
def register(log: Logger): Unit = Seq("TERM", "HUP", "INT").foreach{ sig =>
Signaling.register(sig) {
log.error("RECEIVED SIGNAL " + sig)
false
}
}
}
private sealed class SignalLoggerHandler(name: String, log: Logger) extends SignalHandler {
val prevHandler = Signal.handle(new Signal(name), this)
override def handle(signal: Signal): Unit = {
log.error("RECEIVED SIGNAL " + signal.getNumber() + ": SIG" + signal.getName())
prevHandler.handle(signal)
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.util.{Collections, LinkedList}
import scala.collection.JavaConverters._
import scala.collection.mutable.HashMap
import org.apache.commons.lang3.SystemUtils
import sun.misc.{Signal, SignalHandler}
import org.apache.spark.internal.Logging
/**
* Contains utilities for working with posix signals.
*/
private[spark] object Signaling extends Logging {
/**
* A handler for the given signal that runs a collection of actions.
*/
private class ActionHandler(signal: Signal) extends SignalHandler {
private val actions = Collections.synchronizedList(new LinkedList[() => Boolean])
// original signal handler, before this handler was attached
private val prevHandler: SignalHandler = Signal.handle(signal, this)
/**
* Called when this handler's signal is received. Note that if the same signal is received
* before this method returns, it is escalated to the previous handler.
*/
override def handle(sig: Signal): Unit = {
// register old handler, will receive incoming signals while this handler is running
Signal.handle(signal, prevHandler)
val escalate = actions.asScala forall { action =>
!action()
}
if(escalate) {
prevHandler.handle(sig)
}
// re-register this handler
Signal.handle(signal, this)
}
/**
* Add an action to be run by this handler.
* @param action An action to be run when a signal is received. Return true if the signal
* should be stopped with this handler, false if it should be escalated.
*/
def register(action: => Boolean): Unit = actions.add(() => action)
}
// contains association of signals to their respective handlers
private val handlers = new HashMap[String, ActionHandler]
/**
* Adds an action to be run when a given signal is received by this process.
*
* Note that signals are only supported on unix-like operating systems and work on a best-effort
* basis: if a signal is not available or cannot be intercepted, only a warning is emitted.
*
* All actions for a given signal are run in a separate thread.
*/
def register(signal: String)(action: => Boolean): Unit = synchronized {
if (SystemUtils.IS_OS_UNIX) try {
val handler = handlers.getOrElseUpdate(signal, {
val h = new ActionHandler(new Signal(signal))
logInfo("Registered signal handler for " + signal)
h
})
handler.register(action)
} catch {
case ex: Exception => logWarning(s"Failed to register signal handler for " + signal, ex)
}
}
}
......@@ -1022,6 +1022,7 @@ class SparkILoop(
}
sparkContext = new SparkContext(conf)
logInfo("Created spark context..")
Signaling.cancelOnInterrupt(sparkContext)
sparkContext
}
......
......@@ -88,6 +88,7 @@ object Main extends Logging {
}
sparkContext = new SparkContext(conf)
logInfo("Created spark context..")
Signaling.cancelOnInterrupt(sparkContext)
sparkContext
}
......
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.repl
import org.apache.spark.SparkContext
import org.apache.spark.internal.Logging
import org.apache.spark.util.{Signaling => USignaling}
private[repl] object Signaling extends Logging {
/**
* Register a SIGINT handler, that terminates all active spark jobs or terminates
* when no jobs are currently running.
* This makes it possible to interrupt a running shell job by pressing Ctrl+C.
*/
def cancelOnInterrupt(ctx: SparkContext): Unit = USignaling.register("INT") {
if (!ctx.statusTracker.getActiveJobIds().isEmpty) {
logWarning("Cancelling all active jobs, this can take a while. " +
"Press Ctrl+C again to exit now.")
ctx.cancelAllJobs()
true
} else {
false
}
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment