Skip to content
Snippets Groups Projects
Commit 468af0fa authored by Reynold Xin's avatar Reynold Xin
Browse files

Merge pull request #348 from prabeesh/master

spark -> org.apache.spark

Changed package name spark to org.apache.spark which was missing in some of the files
parents c3cf0475 a91f14cf
No related branches found
No related tags found
No related merge requests found
Showing with 12 additions and 12 deletions
...@@ -134,9 +134,9 @@ object FeederActor { ...@@ -134,9 +134,9 @@ object FeederActor {
* <hostname> and <port> describe the AkkaSystem that Spark Sample feeder is running on. * <hostname> and <port> describe the AkkaSystem that Spark Sample feeder is running on.
* *
* To run this example locally, you may run Feeder Actor as * To run this example locally, you may run Feeder Actor as
* `$ ./bin/run-example spark.streaming.examples.FeederActor 127.0.1.1 9999` * `$ ./bin/run-example org.apache.spark.streaming.examples.FeederActor 127.0.1.1 9999`
* and then run the example * and then run the example
* `$ ./bin/run-example spark.streaming.examples.ActorWordCount local[2] 127.0.1.1 9999` * `$ ./bin/run-example org.apache.spark.streaming.examples.ActorWordCount local[2] 127.0.1.1 9999`
*/ */
object ActorWordCount { object ActorWordCount {
def main(args: Array[String]) { def main(args: Array[String]) {
......
...@@ -28,7 +28,7 @@ import org.apache.spark.streaming.StreamingContext._ ...@@ -28,7 +28,7 @@ import org.apache.spark.streaming.StreamingContext._
* <directory> is the directory that Spark Streaming will use to find and read new text files. * <directory> is the directory that Spark Streaming will use to find and read new text files.
* *
* To run this on your local machine on directory `localdir`, run this example * To run this on your local machine on directory `localdir`, run this example
* `$ ./bin/run-example spark.streaming.examples.HdfsWordCount local[2] localdir` * `$ ./bin/run-example org.apache.spark.streaming.examples.HdfsWordCount local[2] localdir`
* Then create a text file in `localdir` and the words in the file will get counted. * Then create a text file in `localdir` and the words in the file will get counted.
*/ */
object HdfsWordCount { object HdfsWordCount {
......
...@@ -35,7 +35,7 @@ import org.apache.spark.streaming.util.RawTextHelper._ ...@@ -35,7 +35,7 @@ import org.apache.spark.streaming.util.RawTextHelper._
* <numThreads> is the number of threads the kafka consumer should use * <numThreads> is the number of threads the kafka consumer should use
* *
* Example: * Example:
* `./bin/run-example spark.streaming.examples.KafkaWordCount local[2] zoo01,zoo02,zoo03 my-consumer-group topic1,topic2 1` * `./bin/run-example org.apache.spark.streaming.examples.KafkaWordCount local[2] zoo01,zoo02,zoo03 my-consumer-group topic1,topic2 1`
*/ */
object KafkaWordCount { object KafkaWordCount {
def main(args: Array[String]) { def main(args: Array[String]) {
......
...@@ -29,7 +29,7 @@ import org.apache.spark.streaming.StreamingContext._ ...@@ -29,7 +29,7 @@ import org.apache.spark.streaming.StreamingContext._
* To run this on your local machine, you need to first run a Netcat server * To run this on your local machine, you need to first run a Netcat server
* `$ nc -lk 9999` * `$ nc -lk 9999`
* and then run the example * and then run the example
* `$ ./bin/run-example spark.streaming.examples.NetworkWordCount local[2] localhost 9999` * `$ ./bin/run-example org.apache.spark.streaming.examples.NetworkWordCount local[2] localhost 9999`
*/ */
object NetworkWordCount { object NetworkWordCount {
def main(args: Array[String]) { def main(args: Array[String]) {
......
...@@ -29,7 +29,7 @@ import org.apache.spark.streaming.StreamingContext._ ...@@ -29,7 +29,7 @@ import org.apache.spark.streaming.StreamingContext._
* To run this on your local machine, you need to first run a Netcat server * To run this on your local machine, you need to first run a Netcat server
* `$ nc -lk 9999` * `$ nc -lk 9999`
* and then run the example * and then run the example
* `$ ./bin/run-example spark.streaming.examples.StatefulNetworkWordCount local[2] localhost 9999` * `$ ./bin/run-example org.apache.spark.streaming.examples.StatefulNetworkWordCount local[2] localhost 9999`
*/ */
object StatefulNetworkWordCount { object StatefulNetworkWordCount {
def main(args: Array[String]) { def main(args: Array[String]) {
......
...@@ -62,9 +62,9 @@ object SimpleZeroMQPublisher { ...@@ -62,9 +62,9 @@ object SimpleZeroMQPublisher {
* <zeroMQurl> and <topic> describe where zeroMq publisher is running. * <zeroMQurl> and <topic> describe where zeroMq publisher is running.
* *
* To run this example locally, you may run publisher as * To run this example locally, you may run publisher as
* `$ ./bin/run-example spark.streaming.examples.SimpleZeroMQPublisher tcp://127.0.1.1:1234 foo.bar` * `$ ./bin/run-example org.apache.spark.streaming.examples.SimpleZeroMQPublisher tcp://127.0.1.1:1234 foo.bar`
* and run the example as * and run the example as
* `$ ./bin/run-example spark.streaming.examples.ZeroMQWordCount local[2] tcp://127.0.1.1:1234 foo` * `$ ./bin/run-example org.apache.spark.streaming.examples.ZeroMQWordCount local[2] tcp://127.0.1.1:1234 foo`
*/ */
object ZeroMQWordCount { object ZeroMQWordCount {
def main(args: Array[String]) { def main(args: Array[String]) {
......
...@@ -39,8 +39,8 @@ object PageView extends Serializable { ...@@ -39,8 +39,8 @@ object PageView extends Serializable {
/** Generates streaming events to simulate page views on a website. /** Generates streaming events to simulate page views on a website.
* *
* This should be used in tandem with PageViewStream.scala. Example: * This should be used in tandem with PageViewStream.scala. Example:
* $ ./bin/run-example spark.streaming.examples.clickstream.PageViewGenerator 44444 10 * $ ./bin/run-example org.apache.spark.streaming.examples.clickstream.PageViewGenerator 44444 10
* $ ./bin/run-example spark.streaming.examples.clickstream.PageViewStream errorRatePerZipCode localhost 44444 * $ ./bin/run-example org.apache.spark.streaming.examples.clickstream.PageViewStream errorRatePerZipCode localhost 44444
* *
* When running this, you may want to set the root logging level to ERROR in * When running this, you may want to set the root logging level to ERROR in
* conf/log4j.properties to reduce the verbosity of the output. * conf/log4j.properties to reduce the verbosity of the output.
......
...@@ -25,8 +25,8 @@ import org.apache.spark.SparkContext._ ...@@ -25,8 +25,8 @@ import org.apache.spark.SparkContext._
* operators available in Spark streaming. * operators available in Spark streaming.
* *
* This should be used in tandem with PageViewStream.scala. Example: * This should be used in tandem with PageViewStream.scala. Example:
* $ ./bin/run-example spark.streaming.examples.clickstream.PageViewGenerator 44444 10 * $ ./bin/run-example org.apache.spark.streaming.examples.clickstream.PageViewGenerator 44444 10
* $ ./bin/run-example spark.streaming.examples.clickstream.PageViewStream errorRatePerZipCode localhost 44444 * $ ./bin/run-example org.apache.spark.streaming.examples.clickstream.PageViewStream errorRatePerZipCode localhost 44444
*/ */
object PageViewStream { object PageViewStream {
def main(args: Array[String]) { def main(args: Array[String]) {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment