Skip to content

Commit

Permalink
[SQL] Rename Dialect -> ParserDialect.
Browse files Browse the repository at this point in the history
Author: Reynold Xin <[email protected]>

Closes apache#6071 from rxin/parserdialect and squashes the following commits:

ca2eb31 [Reynold Xin] Rename Dialect -> ParserDialect.
  • Loading branch information
rxin committed May 12, 2015
1 parent b94a933 commit 1669675
Show file tree
Hide file tree
Showing 5 changed files with 14 additions and 14 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
*
*/
@DeveloperApi
abstract class Dialect {
abstract class ParserDialect {
// this is the main function that will be implemented by sql parser.
def parse(sqlText: String): LogicalPlan
}
12 changes: 6 additions & 6 deletions sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ import org.apache.spark.sql.catalyst.errors.DialectException
import org.apache.spark.sql.catalyst.optimizer.{DefaultOptimizer, Optimizer}
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan}
import org.apache.spark.sql.catalyst.rules.RuleExecutor
import org.apache.spark.sql.catalyst.Dialect
import org.apache.spark.sql.catalyst.ParserDialect
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, ScalaReflection, expressions}
import org.apache.spark.sql.execution.{Filter, _}
import org.apache.spark.sql.jdbc.{JDBCPartition, JDBCPartitioningInfo, JDBCRelation}
Expand All @@ -49,7 +49,7 @@ import org.apache.spark.{Partition, SparkContext}

/**
* Currently we support the default dialect named "sql", associated with the class
* [[DefaultDialect]]
* [[DefaultParserDialect]]
*
* And we can also provide custom SQL Dialect, for example in Spark SQL CLI:
* {{{
Expand All @@ -74,7 +74,7 @@ import org.apache.spark.{Partition, SparkContext}
*-- "hiveql" (for HiveContext)
* }}}
*/
private[spark] class DefaultDialect extends Dialect {
private[spark] class DefaultParserDialect extends ParserDialect {
@transient
protected val sqlParser = new catalyst.SqlParser

Expand Down Expand Up @@ -176,10 +176,10 @@ class SQLContext(@transient val sparkContext: SparkContext)
@transient
protected[sql] val sqlParser = new SparkSQLParser(getSQLDialect().parse(_))

protected[sql] def getSQLDialect(): Dialect = {
protected[sql] def getSQLDialect(): ParserDialect = {
try {
val clazz = Utils.classForName(dialectClassName)
clazz.newInstance().asInstanceOf[Dialect]
clazz.newInstance().asInstanceOf[ParserDialect]
} catch {
case NonFatal(e) =>
// Since we didn't find the available SQL Dialect, it will fail even for SET command:
Expand Down Expand Up @@ -209,7 +209,7 @@ class SQLContext(@transient val sparkContext: SparkContext)
protected[sql] val defaultSession = createSession()

protected[sql] def dialectClassName = if (conf.dialect == "sql") {
classOf[DefaultDialect].getCanonicalName
classOf[DefaultParserDialect].getCanonicalName
} else {
conf.dialect
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ import org.apache.spark.sql.test.TestSQLContext.{udf => _, _}
import org.apache.spark.sql.types._

/** A SQL Dialect for testing purpose, and it can not be nested type */
class MyDialect extends DefaultDialect
class MyDialect extends DefaultParserDialect

class SQLQuerySuite extends QueryTest with BeforeAndAfterAll {
// Make sure the tables are loaded.
Expand Down Expand Up @@ -94,7 +94,7 @@ class SQLQuerySuite extends QueryTest with BeforeAndAfterAll {
newContext.sql("SELECT 1")
}
// test if the dialect set back to DefaultSQLDialect
assert(newContext.getSQLDialect().getClass === classOf[DefaultDialect])
assert(newContext.getSQLDialect().getClass === classOf[DefaultParserDialect])
}

test("SPARK-4625 support SORT BY in SimpleSQLParser & DSL") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import java.sql.Timestamp
import java.util.{ArrayList => JArrayList}

import org.apache.hadoop.hive.ql.parse.VariableSubstitution
import org.apache.spark.sql.catalyst.Dialect
import org.apache.spark.sql.catalyst.ParserDialect

import scala.collection.JavaConversions._
import scala.language.implicitConversions
Expand Down Expand Up @@ -54,7 +54,7 @@ import org.apache.spark.util.Utils
/**
* This is the HiveQL Dialect, this dialect is strongly bind with HiveContext
*/
private[hive] class HiveQLDialect extends Dialect {
private[hive] class HiveQLDialect extends ParserDialect {
override def parse(sqlText: String): LogicalPlan = {
HiveQl.parseSql(sqlText)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ package org.apache.spark.sql.hive.execution

import org.apache.spark.sql.catalyst.analysis.EliminateSubQueries
import org.apache.spark.sql.catalyst.errors.DialectException
import org.apache.spark.sql.DefaultDialect
import org.apache.spark.sql.DefaultParserDialect
import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SQLConf}
import org.apache.spark.sql.hive.MetastoreRelation
import org.apache.spark.sql.hive.test.TestHive
Expand Down Expand Up @@ -53,7 +53,7 @@ case class WindowData(
area: String,
product: Int)
/** A SQL Dialect for testing purpose, and it can not be nested type */
class MyDialect extends DefaultDialect
class MyDialect extends DefaultParserDialect

/**
* A collection of hive query tests where we generate the answers ourselves instead of depending on
Expand Down Expand Up @@ -247,7 +247,7 @@ class SQLQuerySuite extends QueryTest {

// set the dialect back to the DefaultSQLDialect
sql("SET spark.sql.dialect=sql")
assert(getSQLDialect().getClass === classOf[DefaultDialect])
assert(getSQLDialect().getClass === classOf[DefaultParserDialect])
sql("SET spark.sql.dialect=hiveql")
assert(getSQLDialect().getClass === classOf[HiveQLDialect])

Expand Down

0 comments on commit 1669675

Please sign in to comment.