From 230bbeaa614ed0ee87ecceece42355dd9a4bacb3 Mon Sep 17 00:00:00 2001
From: JeremyNixon <jnixon2@gmail.com>
Date: Tue, 23 Feb 2016 15:57:29 -0800
Subject: [PATCH] [SPARK-10759][ML] update cross validator with include_example

This pull request uses {%include_example%} to add an example for the python cross validator to ml-guide.

Author: JeremyNixon <jnixon2@gmail.com>

Closes #11240 from JeremyNixon/pipeline_include_example.
---
 docs/ml-guide.md                               | 5 +++++
 examples/src/main/python/ml/cross_validator.py | 5 ++++-
 2 files changed, 9 insertions(+), 1 deletion(-)

diff --git a/docs/ml-guide.md b/docs/ml-guide.md
index 5900d665b3..a5a825f64e 100644
--- a/docs/ml-guide.md
+++ b/docs/ml-guide.md
@@ -283,6 +283,11 @@ However, it is also a well-established method for choosing parameters which is m
 {% include_example java/org/apache/spark/examples/ml/JavaModelSelectionViaCrossValidationExample.java %}
 </div>
 
+<div data-lang="python">
+
+{% include_example python/ml/cross_validator.py %}
+</div>
+
 </div>
 
 ## Example: model selection via train validation split
diff --git a/examples/src/main/python/ml/cross_validator.py b/examples/src/main/python/ml/cross_validator.py
index f0ca97c724..5f0ef20218 100644
--- a/examples/src/main/python/ml/cross_validator.py
+++ b/examples/src/main/python/ml/cross_validator.py
@@ -18,12 +18,14 @@
 from __future__ import print_function
 
 from pyspark import SparkContext
+# $example on$
 from pyspark.ml import Pipeline
 from pyspark.ml.classification import LogisticRegression
 from pyspark.ml.evaluation import BinaryClassificationEvaluator
 from pyspark.ml.feature import HashingTF, Tokenizer
 from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
 from pyspark.sql import Row, SQLContext
+# $example off$
 
 """
 A simple example demonstrating model selection using CrossValidator.
@@ -36,7 +38,7 @@ Run with:
 if __name__ == "__main__":
     sc = SparkContext(appName="CrossValidatorExample")
     sqlContext = SQLContext(sc)
-
+    # $example on$
     # Prepare training documents, which are labeled.
     LabeledDocument = Row("id", "text", "label")
     training = sc.parallelize([(0, "a b c d e spark", 1.0),
@@ -92,5 +94,6 @@ if __name__ == "__main__":
     selected = prediction.select("id", "text", "probability", "prediction")
     for row in selected.collect():
         print(row)
+    # $example off$
 
     sc.stop()
-- 
GitLab