diff --git a/docs/spark-standalone.md b/docs/spark-standalone.md index ec5ae5b4f7c10ed75a099f6b4a529244ddcf339f..baa0a062f704c773fd186c1822e84aa125cb3cef 100644 --- a/docs/spark-standalone.md +++ b/docs/spark-standalone.md @@ -20,7 +20,7 @@ then modify `conf/spark-env.sh` in the `dist/` directory before deploying to all You can start a standalone master server by executing: - ./bin/start-master.sh + ./sbin/start-master.sh Once started, the master will print out a `spark://HOST:PORT` URL for itself, which you can use to connect workers to it, or pass as the "master" argument to `SparkContext`. You can also find this URL on diff --git a/ec2/spark_ec2.py b/ec2/spark_ec2.py index a2b0e7e7f47480f2bfd42fbc4951067a161c82d1..d82a1e1490cc0e7125030a7b93a10bfd1ddca208 100755 --- a/ec2/spark_ec2.py +++ b/ec2/spark_ec2.py @@ -436,7 +436,7 @@ def setup_cluster(conn, master_nodes, slave_nodes, opts, deploy_ssh_key): def setup_standalone_cluster(master, slave_nodes, opts): slave_ips = '\n'.join([i.public_dns_name for i in slave_nodes]) ssh(master, opts, "echo \"%s\" > spark/conf/slaves" % (slave_ips)) - ssh(master, opts, "/root/spark/bin/start-all.sh") + ssh(master, opts, "/root/spark/sbin/start-all.sh") def setup_spark_cluster(master, opts): ssh(master, opts, "chmod u+x spark-ec2/setup.sh") diff --git a/make-distribution.sh b/make-distribution.sh index 8765c7e620506ab29868c4b7f5c8d3b31f58a9fd..0463d14762e793560e1d763f6658f0cd956725d8 100755 --- a/make-distribution.sh +++ b/make-distribution.sh @@ -31,9 +31,9 @@ # # Recommended deploy/testing procedure (standalone mode): # 1) Rsync / deploy the dist/ dir to one host -# 2) cd to deploy dir; ./bin/start-master.sh +# 2) cd to deploy dir; ./sbin/start-master.sh # 3) Verify master is up by visiting web page, ie http://master-ip:8080. Note the spark:// URL. -# 4) ./bin/start-slave.sh 1 <<spark:// URL>> +# 4) ./sbin/start-slave.sh 1 <<spark:// URL>> # 5) MASTER="spark://my-master-ip:7077" ./bin/spark-shell #