diff --git a/docs/streaming-kafka-integration.md b/docs/streaming-kafka-integration.md index 0f1e32212eb401f0d647285d427ed322dda584ea..e0d3f4f69be8fa83753771f4a6f37b83dcd723da 100644 --- a/docs/streaming-kafka-integration.md +++ b/docs/streaming-kafka-integration.md @@ -111,7 +111,7 @@ Next, we discuss how to use this approach in your streaming application. <div data-lang="java" markdown="1"> import org.apache.spark.streaming.kafka.*; - JavaPairReceiverInputDStream<String, String> directKafkaStream = + JavaPairInputDStream<String, String> directKafkaStream = KafkaUtils.createDirectStream(streamingContext, [key class], [value class], [key decoder class], [value decoder class], [map of Kafka parameters], [set of topics to consume]); diff --git a/docs/streaming-programming-guide.md b/docs/streaming-programming-guide.md index d7eafff38f35ba23681d471e903dab45db839c6b..6550fcc0521c36680c762334a31bd4c48dd6e4a6 100644 --- a/docs/streaming-programming-guide.md +++ b/docs/streaming-programming-guide.md @@ -145,8 +145,8 @@ import org.apache.spark.streaming.api.java.*; import scala.Tuple2; // Create a local StreamingContext with two working thread and batch interval of 1 second -SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount") -JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(1)) +SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount"); +JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(1)); {% endhighlight %} Using this context, we can create a DStream that represents streaming data from a TCP