Skip to content

Commit

Permalink
Maximum repeatedly substituted alias size (#475)
Browse files Browse the repository at this point in the history
https://issues.apache.org/jira/browse/SPARK-26626
apache#23556 

## What changes were proposed in this pull request?

This adds a `spark.sql.maxRepeatedAliasSize` config option, which specifies the maximum size of an aliased expression to be substituted (in CollapseProject and PhysicalOperation).  This prevents large aliased expressions from being substituted multiple times and exploding the size of the expression tree, eventually OOMing the driver.

The default config value of 100 was chosen through testing to find the optimally performant value:

![image](https://user-images.githubusercontent.com/17480705/51204201-dd285300-18b7-11e9-8781-dd698df00389.png)

## How was this patch tested?

Added unit tests, and did manual testing
  • Loading branch information
j-esse authored and bulldozer-bot[bot] committed Jan 30, 2019
1 parent 8a4a29b commit a51fa9c
Show file tree
Hide file tree
Showing 6 changed files with 87 additions and 4 deletions.
5 changes: 4 additions & 1 deletion FORK.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,14 @@
* core: Broadcast, CoarseGrainedExecutorBackend, CoarseGrainedSchedulerBackend, Executor, MemoryStore, SparkContext, TorrentBroadcast
* kubernetes: ExecutorPodsAllocator, ExecutorPodsLifecycleManager, ExecutorPodsPollingSnapshotSource, ExecutorPodsSnapshot, ExecutorPodsWatchSnapshotSource, KubernetesClusterSchedulerBackend
* yarn: YarnClusterSchedulerBackend, YarnSchedulerBackend

* [SPARK-26626](https://issues.apache.org/jira/browse/SPARK-26626) - Limited the maximum size of repeatedly substituted aliases

# Added

* Gradle plugin to easily create custom docker images for use with k8s
* Filter rLibDir by exists so that daemon.R references the correct file [460](https://github.com/palantir/spark/pull/460)

# Reverted
* [SPARK-25908](https://issues.apache.org/jira/browse/SPARK-25908) - Removal of `monotonicall_increasing_id`, `toDegree`, `toRadians`, `approxCountDistinct`, `unionAll`
* [SPARK-25862](https://issues.apache.org/jira/browse/SPARK-25862) - Removal of `unboundedPreceding`, `unboundedFollowing`, `currentRow`
* [SPARK-25862](https://issues.apache.org/jira/browse/SPARK-25862) - Removal of `unboundedPreceding`, `unboundedFollowing`, `currentRow`
Original file line number Diff line number Diff line change
Expand Up @@ -649,7 +649,8 @@ object CollapseProject extends Rule[LogicalPlan] {

def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case p1 @ Project(_, p2: Project) =>
if (haveCommonNonDeterministicOutput(p1.projectList, p2.projectList)) {
if (haveCommonNonDeterministicOutput(p1.projectList, p2.projectList) ||
hasOversizedRepeatedAliases(p1.projectList, p2.projectList)) {
p1
} else {
p2.copy(projectList = buildCleanedProjectList(p1.projectList, p2.projectList))
Expand Down Expand Up @@ -682,6 +683,28 @@ object CollapseProject extends Rule[LogicalPlan] {
}.exists(!_.deterministic))
}

private def hasOversizedRepeatedAliases(
upper: Seq[NamedExpression], lower: Seq[NamedExpression]): Boolean = {
val aliases = collectAliases(lower)

// Count how many times each alias is used in the upper Project.
// If an alias is only used once, we can safely substitute it without increasing the overall
// tree size
val referenceCounts = AttributeMap(
upper
.flatMap(_.collect { case a: Attribute => a })
.groupBy(identity)
.mapValues(_.size).toSeq
)

// Check for any aliases that are used more than once, and are larger than the configured
// maximum size
aliases.exists({ case (attribute, expression) =>
referenceCounts.getOrElse(attribute, 0) > 1 &&
expression.treeSize > SQLConf.get.maxRepeatedAliasSize
})
}

private def buildCleanedProjectList(
upper: Seq[NamedExpression],
lower: Seq[NamedExpression]): Seq[NamedExpression] = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.internal.SQLConf

/**
* A pattern that matches any number of project or filter operations on top of another relational
Expand Down Expand Up @@ -58,8 +59,13 @@ object PhysicalOperation extends PredicateHelper {
plan match {
case Project(fields, child) if fields.forall(_.deterministic) =>
val (_, filters, other, aliases) = collectProjectsAndFilters(child)
val substitutedFields = fields.map(substitute(aliases)).asInstanceOf[Seq[NamedExpression]]
(Some(substitutedFields), filters, other, collectAliases(substitutedFields))
if (hasOversizedRepeatedAliases(fields, aliases)) {
// Skip substitution if it could overly increase the overall tree size and risk OOMs
(None, Nil, plan, Map.empty)
} else {
val substitutedFields = fields.map(substitute(aliases)).asInstanceOf[Seq[NamedExpression]]
(Some(substitutedFields), filters, other, collectAliases(substitutedFields))
}

case Filter(condition, child) if condition.deterministic =>
val (fields, filters, other, aliases) = collectProjectsAndFilters(child)
Expand All @@ -77,6 +83,26 @@ object PhysicalOperation extends PredicateHelper {
case a @ Alias(child, _) => a.toAttribute -> child
}.toMap

private def hasOversizedRepeatedAliases(fields: Seq[Expression],
aliases: Map[Attribute, Expression]): Boolean = {
// Count how many times each alias is used in the fields.
// If an alias is only used once, we can safely substitute it without increasing the overall
// tree size
val referenceCounts = AttributeMap(
fields
.flatMap(_.collect { case a: Attribute => a })
.groupBy(identity)
.mapValues(_.size).toSeq
)

// Check for any aliases that are used more than once, and are larger than the configured
// maximum size
aliases.exists({ case (attribute, expression) =>
referenceCounts.getOrElse(attribute, 0) > 1 &&
expression.treeSize > SQLConf.get.maxRepeatedAliasSize
})
}

private def substitute(aliases: Map[Attribute, Expression])(expr: Expression): Expression = {
expr.transform {
case a @ Alias(ref: AttributeReference, name) =>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ abstract class TreeNode[BaseType <: TreeNode[BaseType]] extends Product {

lazy val containsChild: Set[TreeNode[_]] = children.toSet

lazy val treeSize: Long = children.map(_.treeSize).sum + 1

private lazy val _hashCode: Int = scala.util.hashing.MurmurHash3.productHash(this)
override def hashCode(): Int = _hashCode

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1610,6 +1610,15 @@ object SQLConf {
"WHERE, which does not follow SQL standard.")
.booleanConf
.createWithDefault(false)

val MAX_REPEATED_ALIAS_SIZE =
buildConf("spark.sql.maxRepeatedAliasSize")
.internal()
.doc("The maximum size of alias expression that will be substituted multiple times " +
"(size defined by the number of nodes in the expression tree). " +
"Used by the CollapseProject optimizer, and PhysicalOperation.")
.intConf
.createWithDefault(100)
}

/**
Expand Down Expand Up @@ -2038,6 +2047,8 @@ class SQLConf extends Serializable with Logging {

def integralDivideReturnLong: Boolean = getConf(SQLConf.LEGACY_INTEGRALDIVIDE_RETURN_LONG)

def maxRepeatedAliasSize: Int = getConf(SQLConf.MAX_REPEATED_ALIAS_SIZE)

/** ********************** SQLConf functionality methods ************ */

/** Set Spark SQL configuration properties. */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,4 +138,22 @@ class CollapseProjectSuite extends PlanTest {
assert(projects.size === 1)
assert(hasMetadata(optimized))
}

test("ensure oversize aliases are not repeatedly substituted") {
var query: LogicalPlan = testRelation
for( a <- 1 to 100) {
query = query.select(('a + 'b).as('a), ('a - 'b).as('b))
}
val projects = Optimize.execute(query.analyze).collect { case p: Project => p }
assert(projects.size >= 12)
}

test("ensure oversize aliases are still substituted once") {
var query: LogicalPlan = testRelation
for( a <- 1 to 20) {
query = query.select(('a + 'b).as('a), 'b)
}
val projects = Optimize.execute(query.analyze).collect { case p: Project => p }
assert(projects.size === 1)
}
}

0 comments on commit a51fa9c

Please sign in to comment.