Skip to content
Snippets Groups Projects
  • Xiangrui Meng's avatar
    0bfacd5c
    [SPARK-6090][MLLIB] add a basic BinaryClassificationMetrics to PySpark/MLlib · 0bfacd5c
    Xiangrui Meng authored
    A simple wrapper around the Scala implementation. `DataFrame` is used for serialization/deserialization. Methods that return `RDD`s are not supported in this PR.
    
    davies If we recognize Scala's `Product`s in Py4J, we can easily add wrappers for Scala methods that returns `RDD[(Double, Double)]`. Is it easy to register serializer for `Product` in PySpark?
    
    Author: Xiangrui Meng <meng@databricks.com>
    
    Closes #4863 from mengxr/SPARK-6090 and squashes the following commits:
    
    009a3a3 [Xiangrui Meng] provide schema
    dcddab5 [Xiangrui Meng] add a basic BinaryClassificationMetrics to PySpark/MLlib
    0bfacd5c
    History
    [SPARK-6090][MLLIB] add a basic BinaryClassificationMetrics to PySpark/MLlib
    Xiangrui Meng authored
    A simple wrapper around the Scala implementation. `DataFrame` is used for serialization/deserialization. Methods that return `RDD`s are not supported in this PR.
    
    davies If we recognize Scala's `Product`s in Py4J, we can easily add wrappers for Scala methods that returns `RDD[(Double, Double)]`. Is it easy to register serializer for `Product` in PySpark?
    
    Author: Xiangrui Meng <meng@databricks.com>
    
    Closes #4863 from mengxr/SPARK-6090 and squashes the following commits:
    
    009a3a3 [Xiangrui Meng] provide schema
    dcddab5 [Xiangrui Meng] add a basic BinaryClassificationMetrics to PySpark/MLlib
run-tests 3.97 KiB
#!/usr/bin/env bash

#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#


# Figure out where the Spark framework is installed
FWDIR="$(cd "`dirname "$0"`"; cd ../; pwd)"

# CD into the python directory to find things on the right path
cd "$FWDIR/python"

FAILED=0
LOG_FILE=unit-tests.log

rm -f $LOG_FILE

# Remove the metastore and warehouse directory created by the HiveContext tests in Spark SQL
rm -rf metastore warehouse

function run_test() {
    echo "Running test: $1" | tee -a $LOG_FILE

    SPARK_TESTING=1 time "$FWDIR"/bin/pyspark $1 > $LOG_FILE 2>&1

    FAILED=$((PIPESTATUS[0]||$FAILED))

    # Fail and exit on the first test failure.
    if [[ $FAILED != 0 ]]; then
        cat $LOG_FILE | grep -v "^[0-9][0-9]*" # filter all lines starting with a number.
        echo -en "\033[31m"  # Red
        echo "Had test failures; see logs."
        echo -en "\033[0m"  # No color
        exit -1
    fi
}

function run_core_tests() {
    echo "Run core tests ..."
    run_test "pyspark/rdd.py"
    run_test "pyspark/context.py"
    run_test "pyspark/conf.py"
    PYSPARK_DOC_TEST=1 run_test "pyspark/broadcast.py"
    PYSPARK_DOC_TEST=1 run_test "pyspark/accumulators.py"
    run_test "pyspark/serializers.py"
    run_test "pyspark/profiler.py" 
    run_test "pyspark/shuffle.py"
    run_test "pyspark/tests.py"
}

function run_sql_tests() {
    echo "Run sql tests ..."
    run_test "pyspark/sql/types.py"
    run_test "pyspark/sql/context.py"
    run_test "pyspark/sql/dataframe.py"
    run_test "pyspark/sql/functions.py"
    run_test "pyspark/sql/tests.py"
}

function run_mllib_tests() {
    echo "Run mllib tests ..."
    run_test "pyspark/mllib/classification.py"
    run_test "pyspark/mllib/clustering.py"
    run_test "pyspark/mllib/evaluation.py"
    run_test "pyspark/mllib/feature.py"
    run_test "pyspark/mllib/linalg.py"
    run_test "pyspark/mllib/rand.py"
    run_test "pyspark/mllib/recommendation.py"
    run_test "pyspark/mllib/regression.py"
    run_test "pyspark/mllib/stat/_statistics.py"
    run_test "pyspark/mllib/tree.py"
    run_test "pyspark/mllib/util.py"
    run_test "pyspark/mllib/tests.py"
}

function run_ml_tests() {
    echo "Run ml tests ..."
    run_test "pyspark/ml/feature.py"
    run_test "pyspark/ml/classification.py"
    run_test "pyspark/ml/tests.py"
}

function run_streaming_tests() {
    echo "Run streaming tests ..."
    run_test "pyspark/streaming/util.py"
    run_test "pyspark/streaming/tests.py"
}

echo "Running PySpark tests. Output is in python/$LOG_FILE."

export PYSPARK_PYTHON="python"

# Try to test with Python 2.6, since that's the minimum version that we support:
if [ $(which python2.6) ]; then
    export PYSPARK_PYTHON="python2.6"
fi

echo "Testing with Python version:"
$PYSPARK_PYTHON --version

run_core_tests
run_sql_tests
run_mllib_tests
run_ml_tests
run_streaming_tests

# Try to test with PyPy
if [ $(which pypy) ]; then
    export PYSPARK_PYTHON="pypy"
    echo "Testing with PyPy version:"
    $PYSPARK_PYTHON --version

    run_core_tests
    run_sql_tests
    run_streaming_tests
fi

if [[ $FAILED == 0 ]]; then
    echo -en "\033[32m"  # Green
    echo "Tests passed."
    echo -en "\033[0m"  # No color
fi

# TODO: in the long-run, it would be nice to use a test runner like `nose`.
# The doctest fixtures are the current barrier to doing this.