diff --git a/bin/beeline.cmd b/bin/beeline.cmd index 8293f311029ddbd07ed8fa0291d538b1ccbf28b1..8ddaa419967a54796c8d2e1564f285cd1bee6b6d 100644 --- a/bin/beeline.cmd +++ b/bin/beeline.cmd @@ -18,4 +18,4 @@ rem limitations under the License. rem set SPARK_HOME=%~dp0.. -cmd /V /E /C %SPARK_HOME%\bin\spark-class.cmd org.apache.hive.beeline.BeeLine %* +cmd /V /E /C "%SPARK_HOME%\bin\spark-class.cmd" org.apache.hive.beeline.BeeLine %* diff --git a/bin/load-spark-env.cmd b/bin/load-spark-env.cmd index 59080edd294f2d9a5affd540aaa083aa5eaf4479..0977025c2036e1ed81e948bb4088b450d40413c5 100644 --- a/bin/load-spark-env.cmd +++ b/bin/load-spark-env.cmd @@ -27,7 +27,7 @@ if [%SPARK_ENV_LOADED%] == [] ( if not [%SPARK_CONF_DIR%] == [] ( set user_conf_dir=%SPARK_CONF_DIR% ) else ( - set user_conf_dir=%~dp0..\conf + set user_conf_dir=..\conf ) call :LoadSparkEnv @@ -35,8 +35,8 @@ if [%SPARK_ENV_LOADED%] == [] ( rem Setting SPARK_SCALA_VERSION if not already set. -set ASSEMBLY_DIR2=%SPARK_HOME%/assembly/target/scala-2.11 -set ASSEMBLY_DIR1=%SPARK_HOME%/assembly/target/scala-2.10 +set ASSEMBLY_DIR2="%SPARK_HOME%\assembly\target\scala-2.11" +set ASSEMBLY_DIR1="%SPARK_HOME%\assembly\target\scala-2.10" if [%SPARK_SCALA_VERSION%] == [] ( diff --git a/bin/pyspark.cmd b/bin/pyspark.cmd index 7c26fbbac28b8f0a17815a5ef1e3f60e8218d9bf..72d046a4ba2cfae7d0f45559afdbc88450721688 100644 --- a/bin/pyspark.cmd +++ b/bin/pyspark.cmd @@ -20,4 +20,4 @@ rem rem This is the entry point for running PySpark. To avoid polluting the rem environment, it just launches a new cmd to do the real work. -cmd /V /E /C %~dp0pyspark2.cmd %* +cmd /V /E /C "%~dp0pyspark2.cmd" %* diff --git a/bin/pyspark2.cmd b/bin/pyspark2.cmd index 51d6d15f66c69c035c5cc6880fd552d6d1068f0a..21fe28155a5963ba2b1f1ab5cf5cbd4e22edf479 100644 --- a/bin/pyspark2.cmd +++ b/bin/pyspark2.cmd @@ -20,7 +20,7 @@ rem rem Figure out where the Spark framework is installed set SPARK_HOME=%~dp0.. -call %SPARK_HOME%\bin\load-spark-env.cmd +call "%SPARK_HOME%\bin\load-spark-env.cmd" set _SPARK_CMD_USAGE=Usage: bin\pyspark.cmd [options] rem Figure out which Python to use. @@ -35,4 +35,4 @@ set PYTHONPATH=%SPARK_HOME%\python\lib\py4j-0.9.1-src.zip;%PYTHONPATH% set OLD_PYTHONSTARTUP=%PYTHONSTARTUP% set PYTHONSTARTUP=%SPARK_HOME%\python\pyspark\shell.py -call %SPARK_HOME%\bin\spark-submit2.cmd pyspark-shell-main --name "PySparkShell" %* +call "%SPARK_HOME%\bin\spark-submit2.cmd" pyspark-shell-main --name "PySparkShell" %* diff --git a/bin/run-example.cmd b/bin/run-example.cmd index 5b2d048d6ed50a5da9e073ee5a2ef4b4a60fa1b9..64f6bc3728d07833238e50ac83eee5503f1b73eb 100644 --- a/bin/run-example.cmd +++ b/bin/run-example.cmd @@ -20,4 +20,4 @@ rem rem This is the entry point for running a Spark example. To avoid polluting rem the environment, it just launches a new cmd to do the real work. -cmd /V /E /C %~dp0run-example2.cmd %* +cmd /V /E /C "%~dp0run-example2.cmd" %* diff --git a/bin/run-example2.cmd b/bin/run-example2.cmd index c3e0221fb62e3e481ed9a4efd92cac5e8d51fbe2..fada43581d1845b4e88617ee9ef2f5fed97a9e38 100644 --- a/bin/run-example2.cmd +++ b/bin/run-example2.cmd @@ -20,12 +20,9 @@ rem set SCALA_VERSION=2.10 rem Figure out where the Spark framework is installed -set FWDIR=%~dp0..\ +set SPARK_HOME=%~dp0.. -rem Export this as SPARK_HOME -set SPARK_HOME=%FWDIR% - -call %SPARK_HOME%\bin\load-spark-env.cmd +call "%SPARK_HOME%\bin\load-spark-env.cmd" rem Test that an argument was given if not "x%1"=="x" goto arg_given @@ -36,12 +33,12 @@ if not "x%1"=="x" goto arg_given goto exit :arg_given -set EXAMPLES_DIR=%FWDIR%examples +set EXAMPLES_DIR=%SPARK_HOME%\examples rem Figure out the JAR file that our examples were packaged into. set SPARK_EXAMPLES_JAR= -if exist "%FWDIR%RELEASE" ( - for %%d in ("%FWDIR%lib\spark-examples*.jar") do ( +if exist "%SPARK_HOME%\RELEASE" ( + for %%d in ("%SPARK_HOME%\lib\spark-examples*.jar") do ( set SPARK_EXAMPLES_JAR=%%d ) ) else ( @@ -80,7 +77,7 @@ if "%~1" neq "" ( ) if defined ARGS set ARGS=%ARGS:~1% -call "%FWDIR%bin\spark-submit.cmd" ^ +call "%SPARK_HOME%\bin\spark-submit.cmd" ^ --master %EXAMPLE_MASTER% ^ --class %EXAMPLE_CLASS% ^ "%SPARK_EXAMPLES_JAR%" %ARGS% diff --git a/bin/spark-class.cmd b/bin/spark-class.cmd index 19850db9e1e5dd813d4fbdffbf24b427c154eb01..3bf3d20cb57b5e254a016863e81b5d443cc4afc3 100644 --- a/bin/spark-class.cmd +++ b/bin/spark-class.cmd @@ -20,4 +20,4 @@ rem rem This is the entry point for running a Spark class. To avoid polluting rem the environment, it just launches a new cmd to do the real work. -cmd /V /E /C %~dp0spark-class2.cmd %* +cmd /V /E /C "%~dp0spark-class2.cmd" %* diff --git a/bin/spark-class2.cmd b/bin/spark-class2.cmd index db09fa27e51a62c842e431b5d77cfa609fb97e3c..c4fadb822323da184706e5f30ef232e26c93e30c 100644 --- a/bin/spark-class2.cmd +++ b/bin/spark-class2.cmd @@ -20,7 +20,7 @@ rem rem Figure out where the Spark framework is installed set SPARK_HOME=%~dp0.. -call %SPARK_HOME%\bin\load-spark-env.cmd +call "%SPARK_HOME%\bin\load-spark-env.cmd" rem Test that an argument was given if "x%1"=="x" ( @@ -32,9 +32,9 @@ rem Find assembly jar set SPARK_ASSEMBLY_JAR=0 if exist "%SPARK_HOME%\RELEASE" ( - set ASSEMBLY_DIR=%SPARK_HOME%\lib + set ASSEMBLY_DIR="%SPARK_HOME%\lib" ) else ( - set ASSEMBLY_DIR=%SPARK_HOME%\assembly\target\scala-%SPARK_SCALA_VERSION% + set ASSEMBLY_DIR="%SPARK_HOME%\assembly\target\scala-%SPARK_SCALA_VERSION%" ) for %%d in (%ASSEMBLY_DIR%\spark-assembly*hadoop*.jar) do ( @@ -50,7 +50,7 @@ set LAUNCH_CLASSPATH=%SPARK_ASSEMBLY_JAR% rem Add the launcher build dir to the classpath if requested. if not "x%SPARK_PREPEND_CLASSES%"=="x" ( - set LAUNCH_CLASSPATH=%SPARK_HOME%\launcher\target\scala-%SPARK_SCALA_VERSION%\classes;%LAUNCH_CLASSPATH% + set LAUNCH_CLASSPATH="%SPARK_HOME%\launcher\target\scala-%SPARK_SCALA_VERSION%\classes;%LAUNCH_CLASSPATH%" ) set _SPARK_ASSEMBLY=%SPARK_ASSEMBLY_JAR% @@ -62,7 +62,7 @@ if not "x%JAVA_HOME%"=="x" set RUNNER=%JAVA_HOME%\bin\java rem The launcher library prints the command to be executed in a single line suitable for being rem executed by the batch interpreter. So read all the output of the launcher into a variable. set LAUNCHER_OUTPUT=%temp%\spark-class-launcher-output-%RANDOM%.txt -"%RUNNER%" -cp %LAUNCH_CLASSPATH% org.apache.spark.launcher.Main %* > %LAUNCHER_OUTPUT% +"%RUNNER%" -cp "%LAUNCH_CLASSPATH%" org.apache.spark.launcher.Main %* > %LAUNCHER_OUTPUT% for /f "tokens=*" %%i in (%LAUNCHER_OUTPUT%) do ( set SPARK_CMD=%%i ) diff --git a/bin/spark-shell.cmd b/bin/spark-shell.cmd index 8f90ba5a0b3b8dd7e656f1aaeb1b063ed44ffa5f..991423da6ab99cd219bde380ede1b508c337b7ba 100644 --- a/bin/spark-shell.cmd +++ b/bin/spark-shell.cmd @@ -20,4 +20,4 @@ rem rem This is the entry point for running Spark shell. To avoid polluting the rem environment, it just launches a new cmd to do the real work. -cmd /V /E /C %~dp0spark-shell2.cmd %* +cmd /V /E /C "%~dp0spark-shell2.cmd" %* diff --git a/bin/spark-shell2.cmd b/bin/spark-shell2.cmd index b9b0f510d7f5d0e731501b85d1d474a29b4676e7..7b5d396be888c11ea37e74f5001d67c94cb4c17a 100644 --- a/bin/spark-shell2.cmd +++ b/bin/spark-shell2.cmd @@ -32,4 +32,4 @@ if "x%SPARK_SUBMIT_OPTS%"=="x" ( set SPARK_SUBMIT_OPTS="%SPARK_SUBMIT_OPTS% -Dscala.usejavacp=true" :run_shell -%SPARK_HOME%\bin\spark-submit2.cmd --class org.apache.spark.repl.Main --name "Spark shell" %* +"%SPARK_HOME%\bin\spark-submit2.cmd" --class org.apache.spark.repl.Main --name "Spark shell" %* diff --git a/bin/spark-submit.cmd b/bin/spark-submit.cmd index 8f3b84c7b971d9cd42f48879a1787ba3cf67c84e..f121b62a53d2486df1a6bf78212157a422ec67f6 100644 --- a/bin/spark-submit.cmd +++ b/bin/spark-submit.cmd @@ -20,4 +20,4 @@ rem rem This is the entry point for running Spark submit. To avoid polluting the rem environment, it just launches a new cmd to do the real work. -cmd /V /E /C %~dp0spark-submit2.cmd %* +cmd /V /E /C spark-submit2.cmd %* diff --git a/bin/spark-submit2.cmd b/bin/spark-submit2.cmd index 651376e526928b48567edd412b21e53b60f06e49..49e350fa5c416740ef4954ff3104e74977824600 100644 --- a/bin/spark-submit2.cmd +++ b/bin/spark-submit2.cmd @@ -24,4 +24,4 @@ rem disable randomized hash for string in Python 3.3+ set PYTHONHASHSEED=0 set CLASS=org.apache.spark.deploy.SparkSubmit -%~dp0spark-class2.cmd %CLASS% %* +"%~dp0spark-class2.cmd" %CLASS% %* diff --git a/bin/sparkR.cmd b/bin/sparkR.cmd index d7b60183ca8e0651a74884f3c5713a3f15a8d5b0..1e5ea6a6232192a2d75e9b09d7d60f5f72ec85a4 100644 --- a/bin/sparkR.cmd +++ b/bin/sparkR.cmd @@ -20,4 +20,4 @@ rem rem This is the entry point for running SparkR. To avoid polluting the rem environment, it just launches a new cmd to do the real work. -cmd /V /E /C %~dp0sparkR2.cmd %* +cmd /V /E /C "%~dp0sparkR2.cmd" %* diff --git a/bin/sparkR2.cmd b/bin/sparkR2.cmd index e47f22c7300bb7dd2af3dfd7d0dba126823bb991..459b780e2ae3361b8f74dc27673d43884ceeed73 100644 --- a/bin/sparkR2.cmd +++ b/bin/sparkR2.cmd @@ -20,7 +20,7 @@ rem rem Figure out where the Spark framework is installed set SPARK_HOME=%~dp0.. -call %SPARK_HOME%\bin\load-spark-env.cmd +call "%SPARK_HOME%\bin\load-spark-env.cmd" -call %SPARK_HOME%\bin\spark-submit2.cmd sparkr-shell-main %* +call "%SPARK_HOME%\bin\spark-submit2.cmd" sparkr-shell-main %*