-
Notifications
You must be signed in to change notification settings - Fork 28.5k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[SPARK-26188][SQL] FileIndex: don't infer data types of partition columns if user specifies schema #23165
[SPARK-26188][SQL] FileIndex: don't infer data types of partition columns if user specifies schema #23165
Changes from 4 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -31,7 +31,7 @@ import org.apache.spark.sql.catalyst.InternalRow | |
import org.apache.spark.sql.catalyst.analysis.{Resolver, TypeCoercion} | ||
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec | ||
import org.apache.spark.sql.catalyst.expressions.{Attribute, Cast, Literal} | ||
import org.apache.spark.sql.catalyst.util.DateTimeUtils | ||
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, DateTimeUtils} | ||
import org.apache.spark.sql.types._ | ||
import org.apache.spark.sql.util.SchemaUtils | ||
|
||
|
@@ -94,18 +94,34 @@ object PartitioningUtils { | |
paths: Seq[Path], | ||
typeInference: Boolean, | ||
basePaths: Set[Path], | ||
userSpecifiedSchema: Option[StructType], | ||
caseSensitive: Boolean, | ||
timeZoneId: String): PartitionSpec = { | ||
parsePartitions(paths, typeInference, basePaths, DateTimeUtils.getTimeZone(timeZoneId)) | ||
parsePartitions(paths, typeInference, basePaths, userSpecifiedSchema, | ||
caseSensitive, DateTimeUtils.getTimeZone(timeZoneId)) | ||
} | ||
|
||
private[datasources] def parsePartitions( | ||
paths: Seq[Path], | ||
typeInference: Boolean, | ||
basePaths: Set[Path], | ||
userSpecifiedSchema: Option[StructType], | ||
caseSensitive: Boolean, | ||
timeZone: TimeZone): PartitionSpec = { | ||
val userSpecifiedDataTypes = if (userSpecifiedSchema.isDefined) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can we build this at the caller side out of There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Personally I prefer to make the parameter simple and easy to understand. So that the logic of caller(outside the |
||
val nameToDataType = userSpecifiedSchema.get.fields.map(f => f.name -> f.dataType).toMap | ||
if (!caseSensitive) { | ||
CaseInsensitiveMap(nameToDataType) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. isn't this if There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, thanks for pointing it out :) |
||
} else { | ||
nameToDataType | ||
} | ||
} else { | ||
Map.empty[String, DataType] | ||
} | ||
|
||
// First, we need to parse every partition's path and see if we can find partition values. | ||
val (partitionValues, optDiscoveredBasePaths) = paths.map { path => | ||
parsePartition(path, typeInference, basePaths, timeZone) | ||
parsePartition(path, typeInference, basePaths, userSpecifiedDataTypes, timeZone) | ||
}.unzip | ||
|
||
// We create pairs of (path -> path's partition value) here | ||
|
@@ -147,13 +163,13 @@ object PartitioningUtils { | |
columnNames.zip(literals).map { case (name, Literal(_, dataType)) => | ||
// We always assume partition columns are nullable since we've no idea whether null values | ||
// will be appended in the future. | ||
StructField(name, dataType, nullable = true) | ||
StructField(name, userSpecifiedDataTypes.getOrElse(name, dataType), nullable = true) | ||
} | ||
} | ||
|
||
// Finally, we create `Partition`s based on paths and resolved partition values. | ||
val partitions = resolvedPartitionValues.zip(pathsWithPartitionValues).map { | ||
case (PartitionValues(_, literals), (path, _)) => | ||
case (PartitionValues(columnNames, literals), (path, _)) => | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. unnecessary change? |
||
PartitionPath(InternalRow.fromSeq(literals.map(_.value)), path) | ||
} | ||
|
||
|
@@ -185,6 +201,7 @@ object PartitioningUtils { | |
path: Path, | ||
typeInference: Boolean, | ||
basePaths: Set[Path], | ||
userSpecifiedDataTypes: Map[String, DataType], | ||
timeZone: TimeZone): (Option[PartitionValues], Option[Path]) = { | ||
val columns = ArrayBuffer.empty[(String, Literal)] | ||
// Old Hadoop versions don't have `Path.isRoot` | ||
|
@@ -206,7 +223,7 @@ object PartitioningUtils { | |
// Let's say currentPath is a path of "/table/a=1/", currentPath.getName will give us a=1. | ||
// Once we get the string, we try to parse it and find the partition column and value. | ||
val maybeColumn = | ||
parsePartitionColumn(currentPath.getName, typeInference, timeZone) | ||
parsePartitionColumn(currentPath.getName, typeInference, userSpecifiedDataTypes, timeZone) | ||
maybeColumn.foreach(columns += _) | ||
|
||
// Now, we determine if we should stop. | ||
|
@@ -239,6 +256,7 @@ object PartitioningUtils { | |
private def parsePartitionColumn( | ||
columnSpec: String, | ||
typeInference: Boolean, | ||
userSpecifiedDataTypes: Map[String, DataType], | ||
timeZone: TimeZone): Option[(String, Literal)] = { | ||
val equalSignIndex = columnSpec.indexOf('=') | ||
if (equalSignIndex == -1) { | ||
|
@@ -250,7 +268,16 @@ object PartitioningUtils { | |
val rawColumnValue = columnSpec.drop(equalSignIndex + 1) | ||
assert(rawColumnValue.nonEmpty, s"Empty partition column value in '$columnSpec'") | ||
|
||
val literal = inferPartitionColumnValue(rawColumnValue, typeInference, timeZone) | ||
val literal = if (userSpecifiedDataTypes.contains(columnName)) { | ||
// SPARK-26188: if user provides corresponding column schema, get the column value without | ||
// inference, and then cast it as user specified data type. | ||
val columnValue = inferPartitionColumnValue(rawColumnValue, false, timeZone) | ||
val castedValue = | ||
Cast(columnValue, userSpecifiedDataTypes(columnName), Option(timeZone.getID)).eval() | ||
Literal.create(castedValue, userSpecifiedDataTypes(columnName)) | ||
} else { | ||
inferPartitionColumnValue(rawColumnValue, typeInference, timeZone) | ||
} | ||
Some(columnName -> literal) | ||
} | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
we can remove
combineInferredAndUserSpecifiedPartitionSchema
now