-
Notifications
You must be signed in to change notification settings - Fork 28.5k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Added examples for spark.ml: SimpleParamsExample + Java version, Cros…
…sValidatorExample + Java version. CrossValidatorExample not working yet. Added programming guide for spark.ml, but need to add CrossValidatorExample to it once CrossValidatorExample works.
- Loading branch information
Showing
12 changed files
with
990 additions
and
6 deletions.
There are no files selected for viewing
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file not shown.
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
123 changes: 123 additions & 0 deletions
123
examples/src/main/java/org/apache/spark/examples/ml/JavaCrossValidatorExample.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,123 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.spark.examples.ml; | ||
|
||
import java.util.ArrayList; | ||
import java.util.List; | ||
|
||
import com.google.common.collect.Lists; | ||
|
||
import org.apache.spark.SparkConf; | ||
import org.apache.spark.api.java.JavaSparkContext; | ||
import org.apache.spark.ml.Model; | ||
import org.apache.spark.ml.Pipeline; | ||
import org.apache.spark.ml.PipelineStage; | ||
import org.apache.spark.ml.classification.LogisticRegression; | ||
import org.apache.spark.ml.classification.LogisticRegressionModel; | ||
import org.apache.spark.ml.evaluation.BinaryClassificationEvaluator; | ||
import org.apache.spark.ml.feature.HashingTF; | ||
import org.apache.spark.ml.feature.Tokenizer; | ||
import org.apache.spark.ml.param.ParamMap; | ||
import org.apache.spark.ml.tuning.CrossValidator; | ||
import org.apache.spark.ml.tuning.CrossValidatorModel; | ||
import org.apache.spark.ml.tuning.ParamGridBuilder; | ||
import org.apache.spark.sql.api.java.JavaSQLContext; | ||
import org.apache.spark.sql.api.java.JavaSchemaRDD; | ||
import org.apache.spark.sql.api.java.Row; | ||
|
||
/** | ||
* A simple example demonstrating model selection using CrossValidator. | ||
* This example also demonstrates how Pipelines are Estimators. | ||
* | ||
* This example uses the Java bean classes {@link org.apache.spark.examples.ml.LabeledDocument} and | ||
* {@link org.apache.spark.examples.ml.Document} defined in the Scala example | ||
* {@link org.apache.spark.examples.ml.SimpleTextClassificationPipeline}. | ||
* | ||
* Run with | ||
* <pre> | ||
* bin/run-example ml.JavaCrossValidatorExample | ||
* </pre> | ||
*/ | ||
public class JavaCrossValidatorExample { | ||
|
||
public static void main(String[] args) { | ||
SparkConf conf = new SparkConf().setAppName("JavaCrossValidatorExample"); | ||
JavaSparkContext jsc = new JavaSparkContext(conf); | ||
JavaSQLContext jsql = new JavaSQLContext(jsc); | ||
|
||
// Prepare training documents, which are labeled. | ||
List<LabeledDocument> localTraining = Lists.newArrayList( | ||
new LabeledDocument(0L, "a b c d e spark", 1.0), | ||
new LabeledDocument(1L, "b d", 0.0), | ||
new LabeledDocument(2L, "spark f g h", 1.0), | ||
new LabeledDocument(3L, "hadoop mapreduce", 0.0)); | ||
JavaSchemaRDD training = | ||
jsql.applySchema(jsc.parallelize(localTraining), LabeledDocument.class); | ||
|
||
// Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and lr. | ||
Tokenizer tokenizer = new Tokenizer() | ||
.setInputCol("text") | ||
.setOutputCol("words"); | ||
HashingTF hashingTF = new HashingTF() | ||
.setNumFeatures(1000) | ||
.setInputCol(tokenizer.getOutputCol()) | ||
.setOutputCol("features"); | ||
LogisticRegression lr = new LogisticRegression() | ||
.setMaxIter(10) | ||
.setRegParam(0.01); | ||
Pipeline pipeline = new Pipeline() | ||
.setStages(new PipelineStage[] {tokenizer, hashingTF, lr}); | ||
|
||
// We now treat the Pipeline as an Estimator, wrapping it in a CrossValidator instance. | ||
// This will allow us to jointly choose parameters for all Pipeline stages. | ||
// A CrossValidator requires an Estimator, a set of Estimator ParamMaps, and an Evaluator. | ||
CrossValidator crossval = new CrossValidator() | ||
.setEstimator(pipeline) | ||
.setEvaluator(new BinaryClassificationEvaluator()); | ||
// We use a ParamGridBuilder to construct a grid of parameters to search over. | ||
// With 3 values for hashingTF.numFeatures and 2 values for lr.regParam, | ||
// this grid will have 3 x 2 = 6 parameter settings for CrossValidator to choose from. | ||
ParamMap[] paramGrid = new ParamGridBuilder() | ||
.addGrid(hashingTF.numFeatures(), new int[]{10, 100, 1000}) | ||
.addGrid(lr.regParam(), new double[]{0.1, 0.01}) | ||
.build(); | ||
crossval.setEstimatorParamMaps(paramGrid); | ||
crossval.setNumFolds(2); | ||
|
||
// Run cross-validation, and choose the best set of parameters. | ||
CrossValidatorModel cvModel = crossval.fit(training); | ||
// Get the best LogisticRegression model (with the best set of parameters from paramGrid). | ||
Model lrModel = cvModel.bestModel(); | ||
|
||
// Prepare test documents, which are unlabeled. | ||
List<Document> localTest = Lists.newArrayList( | ||
new Document(4L, "spark i j k"), | ||
new Document(5L, "l m n"), | ||
new Document(6L, "mapreduce spark"), | ||
new Document(7L, "apache hadoop")); | ||
JavaSchemaRDD test = jsql.applySchema(jsc.parallelize(localTest), Document.class); | ||
|
||
// Make predictions on test documents. | ||
lrModel.transform(test).registerAsTable("prediction"); | ||
JavaSchemaRDD predictions = jsql.sql("SELECT id, text, score, prediction FROM prediction"); | ||
for (Row r: predictions.collect()) { | ||
System.out.println("(" + r.get(0) + ", " + r.get(1) + ") --> score=" + r.get(2) | ||
+ ", prediction=" + r.get(3)); | ||
} | ||
} | ||
} |
111 changes: 111 additions & 0 deletions
111
examples/src/main/java/org/apache/spark/examples/ml/JavaSimpleParamsExample.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,111 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.spark.examples.ml; | ||
|
||
import java.util.List; | ||
|
||
import com.google.common.collect.Lists; | ||
|
||
import org.apache.spark.SparkConf; | ||
import org.apache.spark.api.java.JavaSparkContext; | ||
import org.apache.spark.ml.classification.LogisticRegressionModel; | ||
import org.apache.spark.ml.param.ParamMap; | ||
import org.apache.spark.ml.classification.LogisticRegression; | ||
import org.apache.spark.mllib.linalg.Vectors; | ||
import org.apache.spark.mllib.regression.LabeledPoint; | ||
import org.apache.spark.sql.api.java.JavaSQLContext; | ||
import org.apache.spark.sql.api.java.JavaSchemaRDD; | ||
import org.apache.spark.sql.api.java.Row; | ||
|
||
/** | ||
* A simple example demonstrating ways to specify parameters for Estimators and Transformers. | ||
* Run with | ||
* {{{ | ||
* bin/run-example ml.JavaSimpleParamsExample | ||
* }}} | ||
*/ | ||
public class JavaSimpleParamsExample { | ||
|
||
public static void main(String[] args) { | ||
SparkConf conf = new SparkConf().setAppName("JavaSimpleParamsExample"); | ||
JavaSparkContext jsc = new JavaSparkContext(conf); | ||
JavaSQLContext jsql = new JavaSQLContext(jsc); | ||
|
||
// Prepare training data. | ||
// We use LabeledPoint, which is a case class. Spark SQL can convert RDDs of case classes | ||
// into SchemaRDDs, where it uses the case class metadata to infer the schema. | ||
List<LabeledPoint> localTraining = Lists.newArrayList( | ||
new LabeledPoint(1.0, Vectors.dense(0.0, 1.1, 0.1)), | ||
new LabeledPoint(0.0, Vectors.dense(2.0, 1.0, -1.0)), | ||
new LabeledPoint(0.0, Vectors.dense(2.0, 1.3, 1.0)), | ||
new LabeledPoint(1.0, Vectors.dense(0.0, 1.2, -0.5))); | ||
JavaSchemaRDD training = jsql.applySchema(jsc.parallelize(localTraining), LabeledPoint.class); | ||
|
||
// Create a LogisticRegression instance. This instance is an Estimator. | ||
LogisticRegression lr = new LogisticRegression(); | ||
// Print out the parameters, documentation, and any default values. | ||
System.out.println("LogisticRegression parameters:\n" + lr.explainParams() + "\n"); | ||
|
||
// We may set parameters using setter methods. | ||
lr.setMaxIter(10) | ||
.setRegParam(0.01); | ||
|
||
// Learn a LogisticRegression model. This uses the parameters stored in lr. | ||
LogisticRegressionModel model1 = lr.fit(training); | ||
// Since model1 is a Model (i.e., a Transformer produced by an Estimator), | ||
// we can view the parameters it used during fit(). | ||
// This prints the parameter (name: value) pairs, where names are unique IDs for this | ||
// LogisticRegression instance. | ||
System.out.println("Model 1 was fit using parameters: " + model1.fittingParamMap()); | ||
|
||
// We may alternatively specify parameters using a ParamMap. | ||
ParamMap paramMap = new ParamMap(); | ||
paramMap.put(lr.maxIter(), 20); // Specify 1 Param. | ||
paramMap.put(lr.maxIter(), 30); // This overwrites the original maxIter. | ||
paramMap.put(lr.regParam(), 0.1); | ||
|
||
// One can also combine ParamMaps. | ||
ParamMap paramMap2 = new ParamMap(); | ||
paramMap2.put(lr.scoreCol(), "probability"); // Changes output column name. | ||
ParamMap paramMapCombined = paramMap.$plus$plus(paramMap2); | ||
|
||
// Now learn a new model using the paramMapCombined parameters. | ||
// paramMapCombined overrides all parameters set earlier via lr.set* methods. | ||
LogisticRegressionModel model2 = lr.fit(training, paramMapCombined); | ||
System.out.println("Model 2 was fit using parameters: " + model2.fittingParamMap()); | ||
|
||
// Prepare test documents. | ||
List<LabeledPoint> localTest = Lists.newArrayList( | ||
new LabeledPoint(1.0, Vectors.dense(-1.0, 1.5, 1.3)), | ||
new LabeledPoint(0.0, Vectors.dense(3.0, 2.0, -0.1)), | ||
new LabeledPoint(1.0, Vectors.dense(0.0, 2.2, -1.5))); | ||
JavaSchemaRDD test = jsql.applySchema(jsc.parallelize(localTest), LabeledPoint.class); | ||
|
||
// Make predictions on test documents using the Transformer.transform() method. | ||
// LogisticRegression.transform will only use the 'features' column. | ||
// Note that model2.transform() outputs a 'probability' column instead of the usual 'score' | ||
// column since we renamed the lr.scoreCol parameter previously. | ||
model2.transform(test).registerAsTable("results"); | ||
JavaSchemaRDD results = | ||
jsql.sql("SELECT features, label, probability, prediction FROM results"); | ||
for (Row r: results.collect()) { | ||
System.out.println("(" + r.get(0) + ", " + r.get(1) + ") -> prob=" + r.get(2) | ||
+ ", prediction=" + r.get(3)); | ||
} | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
112 changes: 112 additions & 0 deletions
112
examples/src/main/scala/org/apache/spark/examples/ml/CrossValidatorExample.scala
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,112 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.spark.examples.ml | ||
|
||
import org.apache.spark.{SparkConf, SparkContext} | ||
import org.apache.spark.ml.Pipeline | ||
import org.apache.spark.ml.classification.LogisticRegression | ||
import org.apache.spark.ml.evaluation.BinaryClassificationEvaluator | ||
import org.apache.spark.ml.feature.{HashingTF, Tokenizer} | ||
import org.apache.spark.ml.tuning.{ParamGridBuilder, CrossValidator} | ||
import org.apache.spark.sql.{Row, SQLContext} | ||
|
||
/** | ||
* A simple example demonstrating model selection using CrossValidator. | ||
* This example also demonstrates how Pipelines are Estimators. | ||
* | ||
* This example uses the [[LabeledDocument]] and [[Document]] case classes from | ||
* [[SimpleTextClassificationPipeline]]. | ||
* | ||
* Run with | ||
* {{{ | ||
* bin/run-example ml.CrossValidatorExample | ||
* }}} | ||
*/ | ||
object CrossValidatorExample { | ||
|
||
def main(args: Array[String]) { | ||
val conf = new SparkConf().setAppName("CrossValidatorExample") | ||
val sc = new SparkContext(conf) | ||
val sqlContext = new SQLContext(sc) | ||
import sqlContext._ | ||
|
||
// Prepare training documents, which are labeled. | ||
val training = sparkContext.parallelize(Seq( | ||
LabeledDocument(0L, "a b c d e spark", 1.0), | ||
LabeledDocument(1L, "b d", 0.0), | ||
LabeledDocument(2L, "spark f g h", 1.0), | ||
LabeledDocument(3L, "hadoop mapreduce", 0.0))) | ||
|
||
// Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and lr. | ||
val tokenizer = new Tokenizer() | ||
.setInputCol("text") | ||
.setOutputCol("words") | ||
val hashingTF = new HashingTF() | ||
.setInputCol(tokenizer.getOutputCol) | ||
.setOutputCol("features") | ||
val lr = new LogisticRegression() | ||
.setMaxIter(10) | ||
val pipeline = new Pipeline() | ||
.setStages(Array(tokenizer, hashingTF, lr)) | ||
|
||
// We now treat the Pipeline as an Estimator, wrapping it in a CrossValidator instance. | ||
// This will allow us to jointly choose parameters for all Pipeline stages. | ||
// A CrossValidator requires an Estimator, a set of Estimator ParamMaps, and an Evaluator. | ||
val crossval = new CrossValidator() | ||
.setEstimator(pipeline) | ||
.setEvaluator(new BinaryClassificationEvaluator) | ||
// We use a ParamGridBuilder to construct a grid of parameters to search over. | ||
// With 3 values for hashingTF.numFeatures and 2 values for lr.regParam, | ||
// this grid will have 3 x 2 = 6 parameter settings for CrossValidator to choose from. | ||
val paramGrid = new ParamGridBuilder() | ||
.addGrid(hashingTF.numFeatures, Array(10, 100, 1000)) | ||
.addGrid(lr.regParam, Array(0.1, 0.01)) | ||
.build() | ||
crossval.setEstimatorParamMaps(paramGrid) | ||
crossval.setNumFolds(2) | ||
|
||
// Run cross-validation, and choose the best set of parameters. | ||
val cvModel = try { | ||
crossval.fit(training) | ||
} catch { | ||
case e: Exception => | ||
println("\nSTACK TRACE\n") | ||
println(e.getStackTraceString) | ||
println("\nSTACK TRACE OF CAUSE\n") | ||
println(e.getCause.getStackTraceString) | ||
throw e | ||
} | ||
// Get the best LogisticRegression model (with the best set of parameters from paramGrid). | ||
val lrModel = cvModel.bestModel | ||
|
||
// Prepare test documents, which are unlabeled. | ||
val test = sparkContext.parallelize(Seq( | ||
Document(4L, "spark i j k"), | ||
Document(5L, "l m n"), | ||
Document(6L, "mapreduce spark"), | ||
Document(7L, "apache hadoop"))) | ||
|
||
// Make predictions on test documents using the best LogisticRegression model. | ||
lrModel.transform(test) | ||
.select('id, 'text, 'score, 'prediction) | ||
.collect() | ||
.foreach { case Row(id: Long, text: String, score: Double, prediction: Double) => | ||
println("(" + id + ", " + text + ") --> score=" + score + ", prediction=" + prediction) | ||
} | ||
} | ||
} |
Oops, something went wrong.