From b3414a2fef840612cf50bc0406a726d226b1bfc2 Mon Sep 17 00:00:00 2001 From: "Jungtaek Lim (HeartSaVioR)" Date: Wed, 25 Sep 2019 16:30:57 +0900 Subject: [PATCH] Expand to all cases in SQLQueryTestSuite & ThriftServerQueryTestSuite --- .../apache/spark/sql/SQLQueryTestSuite.scala | 25 ++++++++++------- .../ThriftServerQueryTestSuite.scala | 28 +++++++++---------- 2 files changed, 29 insertions(+), 24 deletions(-) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala index 0acd249d94c9c..c0fe2144cedc2 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala @@ -201,30 +201,26 @@ class SQLQueryTestSuite extends QueryTest with SharedSparkSession { udf: TestUDF) extends TestCase with UDFTest with PgSQLTest protected def createScalaTestCase(testCase: TestCase): Unit = { - // Removing last '.sql' is an workaround of sbt bug which removes test name - // prior to the last dot in JUnitXmlReportPlugin. - // Please refer https://github.com/sbt/sbt/issues/2949 - val refinedTestCaseName = testCase.name.stripSuffix(validFileExtensions) if (blackList.exists(t => - refinedTestCaseName.toLowerCase(Locale.ROOT).contains(t.toLowerCase(Locale.ROOT)))) { + testCase.name.toLowerCase(Locale.ROOT).contains(t.toLowerCase(Locale.ROOT)))) { // Create a test case to ignore this case. - ignore(refinedTestCaseName) { /* Do nothing */ } + ignore(testCase.name) { /* Do nothing */ } } else testCase match { case udfTestCase: UDFTest if udfTestCase.udf.isInstanceOf[TestPythonUDF] && !shouldTestPythonUDFs => - ignore(s"${refinedTestCaseName} is skipped because " + + ignore(s"${testCase.name} is skipped because " + s"[$pythonExec] and/or pyspark were not available.") { /* Do nothing */ } case udfTestCase: UDFTest if udfTestCase.udf.isInstanceOf[TestScalarPandasUDF] && !shouldTestScalarPandasUDFs => - ignore(s"${refinedTestCaseName} is skipped because pyspark," + + ignore(s"${testCase.name} is skipped because pyspark," + s"pandas and/or pyarrow were not available in [$pythonExec].") { /* Do nothing */ } case _ => // Create a test case to run this case. - test(refinedTestCaseName) { + test(testCase.name) { runTest(testCase) } } @@ -449,11 +445,20 @@ class SQLQueryTestSuite extends QueryTest with SharedSparkSession { .replaceAll("\\*\\(\\d+\\) ", "*") // remove the WholeStageCodegen codegenStageIds } + protected def extractTestCaseName(absolutePath: String): String = { + // Removing last '.sql' is an workaround of sbt bug which removes test name + // prior to the last dot in JUnitXmlReportPlugin. + // Please refer https://github.com/sbt/sbt/issues/2949 + absolutePath.stripPrefix(inputFilePath).stripPrefix(File.separator) + .stripSuffix(validFileExtensions) + } + protected def listTestCases(): Seq[TestCase] = { listFilesRecursively(new File(inputFilePath)).flatMap { file => val resultFile = file.getAbsolutePath.replace(inputFilePath, goldenFilePath) + ".out" val absPath = file.getAbsolutePath - val testCaseName = absPath.stripPrefix(inputFilePath).stripPrefix(File.separator) + val testCaseName = extractTestCaseName(absPath) + if (file.getAbsolutePath.startsWith( s"$inputFilePath${File.separator}udf${File.separator}pgSQL")) { Seq(TestScalaUDF("udf"), TestPythonUDF("udf"), TestScalarPandasUDF("udf")).map { udf => diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerQueryTestSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerQueryTestSuite.scala index fbcf97c2b6686..fa7adfdf689ba 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerQueryTestSuite.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerQueryTestSuite.scala @@ -84,23 +84,23 @@ class ThriftServerQueryTestSuite extends SQLQueryTestSuite { /** List of test cases to ignore, in lower cases. */ override def blackList: Set[String] = Set( - "blacklist.sql", // Do NOT remove this one. It is here to test the blacklist functionality. + "blacklist", // Do NOT remove this one. It is here to test the blacklist functionality. // Missing UDF - "pgSQL/boolean.sql", - "pgSQL/case.sql", + "pgSQL/boolean", + "pgSQL/case", // SPARK-28624 - "date.sql", + "date", // SPARK-28620 - "pgSQL/float4.sql", + "pgSQL/float4", // SPARK-28636 - "decimalArithmeticOperations.sql", - "literals.sql", - "subquery/scalar-subquery/scalar-subquery-predicate.sql", - "subquery/in-subquery/in-limit.sql", - "subquery/in-subquery/in-group-by.sql", - "subquery/in-subquery/simple-in.sql", - "subquery/in-subquery/in-order-by.sql", - "subquery/in-subquery/in-set-operations.sql" + "decimalArithmeticOperations", + "literals", + "subquery/scalar-subquery/scalar-subquery-predicate", + "subquery/in-subquery/in-limit", + "subquery/in-subquery/in-group-by", + "subquery/in-subquery/simple-in", + "subquery/in-subquery/in-order-by", + "subquery/in-subquery/in-set-operations" ) override def runQueries( @@ -234,7 +234,7 @@ class ThriftServerQueryTestSuite extends SQLQueryTestSuite { listFilesRecursively(new File(inputFilePath)).flatMap { file => val resultFile = file.getAbsolutePath.replace(inputFilePath, goldenFilePath) + ".out" val absPath = file.getAbsolutePath - val testCaseName = absPath.stripPrefix(inputFilePath).stripPrefix(File.separator) + val testCaseName = extractTestCaseName(absPath) if (file.getAbsolutePath.startsWith(s"$inputFilePath${File.separator}udf")) { Seq.empty