Skip to content
Snippets Groups Projects
Commit cfdcef70 authored by Jacek Laskowski's avatar Jacek Laskowski Committed by Reynold Xin
Browse files

[STREAMING][MINOR] Scaladoc + logs

Found while doing code review

Author: Jacek Laskowski <jacek@japila.pl>

Closes #10878 from jaceklaskowski/streaming-scaladoc-logs-tiny-fixes.
parent 423783a0
No related branches found
No related tags found
No related merge requests found
...@@ -30,9 +30,8 @@ import org.apache.spark.util.ClosureCleaner ...@@ -30,9 +30,8 @@ import org.apache.spark.util.ClosureCleaner
* `mapWithState` operation of a * `mapWithState` operation of a
* [[org.apache.spark.streaming.dstream.PairDStreamFunctions pair DStream]] (Scala) or a * [[org.apache.spark.streaming.dstream.PairDStreamFunctions pair DStream]] (Scala) or a
* [[org.apache.spark.streaming.api.java.JavaPairDStream JavaPairDStream]] (Java). * [[org.apache.spark.streaming.api.java.JavaPairDStream JavaPairDStream]] (Java).
* Use the [[org.apache.spark.streaming.StateSpec StateSpec.apply()]] or * Use [[org.apache.spark.streaming.StateSpec.function() StateSpec.function]] factory methods
* [[org.apache.spark.streaming.StateSpec StateSpec.create()]] to create instances of * to create instances of this class.
* this class.
* *
* Example in Scala: * Example in Scala:
* {{{ * {{{
......
...@@ -90,7 +90,7 @@ private[streaming] class MapWithStateDStreamImpl[ ...@@ -90,7 +90,7 @@ private[streaming] class MapWithStateDStreamImpl[
} }
/** /**
* A DStream that allows per-key state to be maintains, and arbitrary records to be generated * A DStream that allows per-key state to be maintained, and arbitrary records to be generated
* based on updates to the state. This is the main DStream that implements the `mapWithState` * based on updates to the state. This is the main DStream that implements the `mapWithState`
* operation on DStreams. * operation on DStreams.
* *
......
...@@ -166,7 +166,7 @@ private[streaming] class ReceivedBlockTracker( ...@@ -166,7 +166,7 @@ private[streaming] class ReceivedBlockTracker(
def cleanupOldBatches(cleanupThreshTime: Time, waitForCompletion: Boolean): Unit = synchronized { def cleanupOldBatches(cleanupThreshTime: Time, waitForCompletion: Boolean): Unit = synchronized {
require(cleanupThreshTime.milliseconds < clock.getTimeMillis()) require(cleanupThreshTime.milliseconds < clock.getTimeMillis())
val timesToCleanup = timeToAllocatedBlocks.keys.filter { _ < cleanupThreshTime }.toSeq val timesToCleanup = timeToAllocatedBlocks.keys.filter { _ < cleanupThreshTime }.toSeq
logInfo("Deleting batches " + timesToCleanup) logInfo(s"Deleting batches: ${timesToCleanup.mkString(" ")}")
if (writeToLog(BatchCleanupEvent(timesToCleanup))) { if (writeToLog(BatchCleanupEvent(timesToCleanup))) {
timeToAllocatedBlocks --= timesToCleanup timeToAllocatedBlocks --= timesToCleanup
writeAheadLogOption.foreach(_.clean(cleanupThreshTime.milliseconds, waitForCompletion)) writeAheadLogOption.foreach(_.clean(cleanupThreshTime.milliseconds, waitForCompletion))
......
...@@ -91,7 +91,7 @@ private[streaming] class StreamingJobProgressListener(ssc: StreamingContext) ...@@ -91,7 +91,7 @@ private[streaming] class StreamingJobProgressListener(ssc: StreamingContext)
override def onBatchStarted(batchStarted: StreamingListenerBatchStarted): Unit = synchronized { override def onBatchStarted(batchStarted: StreamingListenerBatchStarted): Unit = synchronized {
val batchUIData = BatchUIData(batchStarted.batchInfo) val batchUIData = BatchUIData(batchStarted.batchInfo)
runningBatchUIData(batchStarted.batchInfo.batchTime) = BatchUIData(batchStarted.batchInfo) runningBatchUIData(batchStarted.batchInfo.batchTime) = batchUIData
waitingBatchUIData.remove(batchStarted.batchInfo.batchTime) waitingBatchUIData.remove(batchStarted.batchInfo.batchTime)
totalReceivedRecords += batchUIData.numRecords totalReceivedRecords += batchUIData.numRecords
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment