Skip to content

Commit

Permalink
Merge pull request #403 from ashelkovnykov/total
Browse files Browse the repository at this point in the history
Remove dependency on photon-diagnostics module from photon-client
  • Loading branch information
li-ashelkov authored Nov 14, 2018
2 parents 34d54bf + 7381fc7 commit a0f1d82
Show file tree
Hide file tree
Showing 20 changed files with 271 additions and 612 deletions.
4 changes: 4 additions & 0 deletions photon-all/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ dependencies {
configurations {
runtime {
exclude group: 'log4j'
exclude group: 'org.apache.hadoop'
exclude group: 'org.apache.spark'
exclude group: 'org.scala-lang'
exclude group: 'org.slf4j'
Expand Down Expand Up @@ -54,7 +55,10 @@ sourcesJar {

configurations {
all {
// Remove Snappy JAR which causes downstream issues
exclude group: 'org.xerial.snappy'
// Remove JUnit testing dependencies that end up in the jar through Breeze
exclude group: 'junit'
}
}

Expand Down
1 change: 0 additions & 1 deletion photon-api/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ apply from: '../build-scripts/integration-test.gradle'

dependencies {
compile(project(":photon-lib$scalaSuffix"))
compile(project(":photon-test-utils$scalaSuffix"))
compile("org.scalanlp:breeze$scalaSuffix:0.11.2")
compile('com.linkedin.paldb:paldb:1.1.0')

Expand Down
1 change: 0 additions & 1 deletion photon-client/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ apply from: '../build-scripts/integration-test.gradle'
dependencies {
compile(project(':photon-avro-schemas'))
compile(project(":photon-core$scalaSuffix"))
compile(project(":photon-diagnostics$scalaSuffix"))
compile('joda-time:joda-time:2.7')
// Stub warnings without this
compile('org.joda:joda-convert:1.8.1')
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,6 @@ import org.apache.hadoop.fs.Path
import org.apache.spark.SparkContext
import org.testng.Assert._

import com.linkedin.photon.ml.diagnostics.DiagnosticMode.DiagnosticMode
import com.linkedin.photon.ml.diagnostics.DiagnosticStatus
import com.linkedin.photon.ml.diagnostics.bootstrap.BootstrapReport
import com.linkedin.photon.ml.diagnostics.featureimportance.FeatureImportanceReport
import com.linkedin.photon.ml.diagnostics.fitting.FittingReport
import com.linkedin.photon.ml.diagnostics.hl.HosmerLemeshowReport
import com.linkedin.photon.ml.diagnostics.independence.PredictionErrorIndependenceReport
import com.linkedin.photon.ml.stat.FeatureDataStatistics
import com.linkedin.photon.ml.util.PhotonLogger

Expand All @@ -40,10 +33,6 @@ class MockDriver(
override val seed: Long)
extends Driver(params: Params, sc: SparkContext, logger: PhotonLogger, seed) {

/**
* Diagnostic status for current run
*/
private val diagnosticStatus = DiagnosticStatus(trainDiagnosed = false, validateDiagnosed = false)
/**
* Have the input features been summarized
*/
Expand All @@ -65,36 +54,6 @@ class MockDriver(
*/
def metrics: Map[Double, Map[String, Double]] = perModelMetrics

/**
*
*/
override protected def initializeDiagnosticReport(): Unit = {
diagnosticStatus.trainDiagnosed = false
diagnosticStatus.validateDiagnosed = false
super.initializeDiagnosticReport()
}

/**
*
* @return
*/
override protected def trainDiagnostic(): (Map[Double, FittingReport], Map[Double, BootstrapReport]) = {
diagnosticStatus.trainDiagnosed = true
super.trainDiagnostic()
}

/**
*
* @return
*/
override protected def validateDiagnostic(): (
Map[Double, (FeatureImportanceReport, FeatureImportanceReport, PredictionErrorIndependenceReport)],
Map[Double, Option[HosmerLemeshowReport]]) = {

diagnosticStatus.validateDiagnosed = true
super.validateDiagnostic()
}

/**
*
* @param outputDir
Expand All @@ -120,16 +79,14 @@ object MockDriver {
* @param expectedNumFeatures The expected number of features in the input data
* @param expectedNumTrainingData The expected number of training records
* @param expectedIsSummarized Whether feature summarization was expected or not
* @param expectedDiagnosticMode The expected levels of diagnostics run
*/
def runLocally(
args: Array[String],
sc: SparkContext,
expectedStages: Array[DriverStage],
expectedNumFeatures: Int,
expectedNumTrainingData: Int,
expectedIsSummarized: Boolean,
expectedDiagnosticMode: DiagnosticMode): MockDriver = {
expectedIsSummarized: Boolean): MockDriver = {

// Parse the parameters from command line, should always be the 1st line in main
val params = PhotonMLCmdLineParser.parseFromCommandLine(args)
Expand All @@ -148,7 +105,6 @@ object MockDriver {
assertEquals(job.numTrainingData(), expectedNumTrainingData,
"The number of training data points " + job.numTrainingData() + " do not meet the expectation")
assertEquals(job.isSummarized, expectedIsSummarized)
assertEquals(job.diagnosticStatus.getDiagnosticMode, expectedDiagnosticMode)

// Closing up
logger.close()
Expand Down

This file was deleted.

This file was deleted.

Loading

0 comments on commit a0f1d82

Please sign in to comment.