From c7db274be79f448fda566208946cb50958ea9b1a Mon Sep 17 00:00:00 2001 From: Zongheng Yang Date: Tue, 29 Jul 2014 15:32:50 -0700 Subject: [PATCH 1/6] [SPARK-2393][SQL] Cost estimation optimization framework for Catalyst logical plans & sample usage. The idea is that every Catalyst logical plan gets hold of a Statistics class, the usage of which provides useful estimations on various statistics. See the implementations of `MetastoreRelation`. This patch also includes several usages of the estimation interface in the planner. For instance, we now use physical table sizes from the estimate interface to convert an equi-join to a broadcast join (when doing so is beneficial, as determined by a size threshold). Finally, there are a couple minor accompanying changes including: - Remove the not-in-use `BaseRelation`. - Make SparkLogicalPlan take a `SQLContext` in the second param list. Author: Zongheng Yang Closes #1238 from concretevitamin/estimates and squashes the following commits: 329071d [Zongheng Yang] Address review comments; turn config name from string to field in SQLConf. 8663e84 [Zongheng Yang] Use BigInt for stat; for logical leaves, by default throw an exception. 2f2fb89 [Zongheng Yang] Fix statistics for SparkLogicalPlan. 9951305 [Zongheng Yang] Remove childrenStats. 16fc60a [Zongheng Yang] Avoid calling statistics on plans if auto join conversion is disabled. 8bd2816 [Zongheng Yang] Add a note on performance of statistics. 6e594b8 [Zongheng Yang] Get size info from metastore for MetastoreRelation. 01b7a3e [Zongheng Yang] Update scaladoc for a field and move it to @param section. 549061c [Zongheng Yang] Remove numTuples in Statistics for now. 729a8e2 [Zongheng Yang] Update docs to be more explicit. 573e644 [Zongheng Yang] Remove singleton SQLConf and move back `settings` to the trait. 2d99eb5 [Zongheng Yang] {Cleanup, use synchronized in, enrich} StatisticsSuite. ca5b825 [Zongheng Yang] Inject SQLContext into SparkLogicalPlan, removing SQLConf mixin from it. 43d38a6 [Zongheng Yang] Revert optimization for BroadcastNestedLoopJoin (this fixes tests). 0ef9e5b [Zongheng Yang] Use multiplication instead of sum for default estimates. 4ef0d26 [Zongheng Yang] Make Statistics a case class. 3ba8f3e [Zongheng Yang] Add comment. e5bcf5b [Zongheng Yang] Fix optimization conditions & update scala docs to explain. 7d9216a [Zongheng Yang] Apply estimation to planning ShuffleHashJoin & BroadcastNestedLoopJoin. 73cde01 [Zongheng Yang] Move SQLConf back. Assign default sizeInBytes to SparkLogicalPlan. 73412be [Zongheng Yang] Move SQLConf to Catalyst & add default val for sizeInBytes. 7a60ab7 [Zongheng Yang] s/Estimates/Statistics, s/cardinality/numTuples. de3ae13 [Zongheng Yang] Add parquetAfter() properly in test. dcff9bd [Zongheng Yang] Cleanups. 84301a4 [Zongheng Yang] Refactors. 5bf5586 [Zongheng Yang] Typo. 56a8e6e [Zongheng Yang] Prototype impl of estimations for Catalyst logical plans. --- .../sql/catalyst/analysis/unresolved.scala | 4 +- .../catalyst/plans/logical/BaseRelation.scala | 24 ----- .../catalyst/plans/logical/LogicalPlan.scala | 22 +++++ .../scala/org/apache/spark/sql/SQLConf.scala | 61 +++++++----- .../org/apache/spark/sql/SQLContext.scala | 20 ++-- .../org/apache/spark/sql/SchemaRDD.scala | 3 +- .../org/apache/spark/sql/SchemaRDDLike.scala | 2 +- .../spark/sql/api/java/JavaSQLContext.scala | 4 +- .../spark/sql/execution/SparkPlan.scala | 18 ++-- .../spark/sql/execution/SparkStrategies.scala | 57 ++++++----- .../org/apache/spark/sql/json/JsonRDD.scala | 11 ++- .../spark/sql/parquet/ParquetRelation.scala | 4 +- .../org/apache/spark/sql/JoinSuite.scala | 2 - .../spark/sql/hive/HiveMetastoreCatalog.scala | 47 ++++++--- .../spark/sql/hive/StatisticsSuite.scala | 95 +++++++++++++++++++ .../hive/execution/HiveComparisonTest.scala | 2 +- .../sql/hive/execution/HiveQuerySuite.scala | 2 +- .../spark/sql/parquet/HiveParquetSuite.scala | 2 +- 18 files changed, 256 insertions(+), 124 deletions(-) delete mode 100644 sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/BaseRelation.scala create mode 100644 sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala index 7abeb032964e1..a0e25775da6dd 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala @@ -20,7 +20,7 @@ package org.apache.spark.sql.catalyst.analysis import org.apache.spark.sql.catalyst.{errors, trees} import org.apache.spark.sql.catalyst.errors.TreeNodeException import org.apache.spark.sql.catalyst.expressions._ -import org.apache.spark.sql.catalyst.plans.logical.BaseRelation +import org.apache.spark.sql.catalyst.plans.logical.LeafNode import org.apache.spark.sql.catalyst.trees.TreeNode /** @@ -36,7 +36,7 @@ class UnresolvedException[TreeType <: TreeNode[_]](tree: TreeType, function: Str case class UnresolvedRelation( databaseName: Option[String], tableName: String, - alias: Option[String] = None) extends BaseRelation { + alias: Option[String] = None) extends LeafNode { override def output = Nil override lazy val resolved = false } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/BaseRelation.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/BaseRelation.scala deleted file mode 100644 index 582334aa42590..0000000000000 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/BaseRelation.scala +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.catalyst.plans.logical - -abstract class BaseRelation extends LeafNode { - self: Product => - - def tableName: String -} diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala index edc37e3877c0e..ac85f95b52a2f 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala @@ -26,6 +26,25 @@ import org.apache.spark.sql.catalyst.trees abstract class LogicalPlan extends QueryPlan[LogicalPlan] { self: Product => + /** + * Estimates of various statistics. The default estimation logic simply lazily multiplies the + * corresponding statistic produced by the children. To override this behavior, override + * `statistics` and assign it an overriden version of `Statistics`. + * + * '''NOTE''': concrete and/or overriden versions of statistics fields should pay attention to the + * performance of the implementations. The reason is that estimations might get triggered in + * performance-critical processes, such as query plan planning. + * + * @param sizeInBytes Physical size in bytes. For leaf operators this defaults to 1, otherwise it + * defaults to the product of children's `sizeInBytes`. + */ + case class Statistics( + sizeInBytes: BigInt + ) + lazy val statistics: Statistics = Statistics( + sizeInBytes = children.map(_.statistics).map(_.sizeInBytes).product + ) + /** * Returns the set of attributes that are referenced by this node * during evaluation. @@ -92,6 +111,9 @@ abstract class LogicalPlan extends QueryPlan[LogicalPlan] { abstract class LeafNode extends LogicalPlan with trees.LeafNode[LogicalPlan] { self: Product => + override lazy val statistics: Statistics = + throw new UnsupportedOperationException("default leaf nodes don't have meaningful Statistics") + // Leaf nodes by definition cannot reference any input attributes. override def references = Set.empty } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala index 41920c00b5a2c..be8d4e15ec4b0 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala @@ -21,17 +21,31 @@ import java.util.Properties import scala.collection.JavaConverters._ +object SQLConf { + val AUTO_BROADCASTJOIN_THRESHOLD = "spark.sql.autoBroadcastJoinThreshold" + val SHUFFLE_PARTITIONS = "spark.sql.shuffle.partitions" + val DEFAULT_SIZE_IN_BYTES = "spark.sql.defaultSizeInBytes" + + object Deprecated { + val MAPRED_REDUCE_TASKS = "mapred.reduce.tasks" + } +} + /** - * SQLConf holds mutable config parameters and hints. These can be set and - * queried either by passing SET commands into Spark SQL's DSL - * functions (sql(), hql(), etc.), or by programmatically using setters and - * getters of this class. + * A trait that enables the setting and getting of mutable config parameters/hints. + * + * In the presence of a SQLContext, these can be set and queried by passing SET commands + * into Spark SQL's query functions (sql(), hql(), etc.). Otherwise, users of this trait can + * modify the hints by programmatically calling the setters and getters of this trait. * - * SQLConf is thread-safe (internally synchronized so safe to be used in multiple threads). + * SQLConf is thread-safe (internally synchronized, so safe to be used in multiple threads). */ trait SQLConf { import SQLConf._ + @transient protected[spark] val settings = java.util.Collections.synchronizedMap( + new java.util.HashMap[String, String]()) + /** ************************ Spark SQL Params/Hints ******************* */ // TODO: refactor so that these hints accessors don't pollute the name space of SQLContext? @@ -40,28 +54,33 @@ trait SQLConf { /** * Upper bound on the sizes (in bytes) of the tables qualified for the auto conversion to - * a broadcast value during the physical executions of join operations. Setting this to 0 + * a broadcast value during the physical executions of join operations. Setting this to -1 * effectively disables auto conversion. - * Hive setting: hive.auto.convert.join.noconditionaltask.size. + * + * Hive setting: hive.auto.convert.join.noconditionaltask.size, whose default value is also 10000. */ - private[spark] def autoConvertJoinSize: Int = get(AUTO_CONVERT_JOIN_SIZE, "10000").toInt + private[spark] def autoBroadcastJoinThreshold: Int = + get(AUTO_BROADCASTJOIN_THRESHOLD, "10000").toInt - /** A comma-separated list of table names marked to be broadcasted during joins. */ - private[spark] def joinBroadcastTables: String = get(JOIN_BROADCAST_TABLES, "") + /** + * The default size in bytes to assign to a logical operator's estimation statistics. By default, + * it is set to a larger value than `autoConvertJoinSize`, hence any logical operator without a + * properly implemented estimation of this statistic will not be incorrectly broadcasted in joins. + */ + private[spark] def defaultSizeInBytes: Long = + getOption(DEFAULT_SIZE_IN_BYTES).map(_.toLong).getOrElse(autoBroadcastJoinThreshold + 1) /** ********************** SQLConf functionality methods ************ */ - @transient - private val settings = java.util.Collections.synchronizedMap( - new java.util.HashMap[String, String]()) - def set(props: Properties): Unit = { - props.asScala.foreach { case (k, v) => this.settings.put(k, v) } + settings.synchronized { + props.asScala.foreach { case (k, v) => settings.put(k, v) } + } } def set(key: String, value: String): Unit = { require(key != null, "key cannot be null") - require(value != null, s"value cannot be null for $key") + require(value != null, s"value cannot be null for key: $key") settings.put(key, value) } @@ -90,13 +109,3 @@ trait SQLConf { } } - -object SQLConf { - val AUTO_CONVERT_JOIN_SIZE = "spark.sql.auto.convert.join.size" - val SHUFFLE_PARTITIONS = "spark.sql.shuffle.partitions" - val JOIN_BROADCAST_TABLES = "spark.sql.join.broadcastTables" - - object Deprecated { - val MAPRED_REDUCE_TASKS = "mapred.reduce.tasks" - } -} diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala index c178dad662532..a136c7b3ffef5 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala @@ -24,14 +24,14 @@ import org.apache.hadoop.conf.Configuration import org.apache.spark.annotation.{AlphaComponent, DeveloperApi, Experimental} import org.apache.spark.rdd.RDD -import org.apache.spark.sql.catalyst.analysis._ import org.apache.spark.sql.catalyst.ScalaReflection -import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.analysis._ import org.apache.spark.sql.catalyst.dsl.ExpressionConversions -import org.apache.spark.sql.catalyst.types._ +import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.optimizer.Optimizer import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.rules.RuleExecutor +import org.apache.spark.sql.catalyst.types._ import org.apache.spark.sql.columnar.InMemoryRelation import org.apache.spark.sql.execution._ import org.apache.spark.sql.execution.SparkStrategies @@ -86,7 +86,7 @@ class SQLContext(@transient val sparkContext: SparkContext) * @group userf */ implicit def createSchemaRDD[A <: Product: TypeTag](rdd: RDD[A]) = - new SchemaRDD(this, SparkLogicalPlan(ExistingRdd.fromProductRdd(rdd))) + new SchemaRDD(this, SparkLogicalPlan(ExistingRdd.fromProductRdd(rdd))(self)) /** * Loads a Parquet file, returning the result as a [[SchemaRDD]]. @@ -127,7 +127,7 @@ class SQLContext(@transient val sparkContext: SparkContext) */ @Experimental def jsonRDD(json: RDD[String], samplingRatio: Double): SchemaRDD = - new SchemaRDD(this, JsonRDD.inferSchema(json, samplingRatio)) + new SchemaRDD(this, JsonRDD.inferSchema(self, json, samplingRatio)) /** * :: Experimental :: @@ -170,11 +170,7 @@ class SQLContext(@transient val sparkContext: SparkContext) * @group userf */ def registerRDDAsTable(rdd: SchemaRDD, tableName: String): Unit = { - val name = tableName - val newPlan = rdd.logicalPlan transform { - case s @ SparkLogicalPlan(ExistingRdd(_, _), _) => s.copy(tableName = name) - } - catalog.registerTable(None, tableName, newPlan) + catalog.registerTable(None, tableName, rdd.logicalPlan) } /** @@ -212,7 +208,7 @@ class SQLContext(@transient val sparkContext: SparkContext) case inMem @ InMemoryRelation(_, _, e: ExistingRdd) => inMem.cachedColumnBuffers.unpersist() catalog.unregisterTable(None, tableName) - catalog.registerTable(None, tableName, SparkLogicalPlan(e)) + catalog.registerTable(None, tableName, SparkLogicalPlan(e)(self)) case inMem: InMemoryRelation => inMem.cachedColumnBuffers.unpersist() catalog.unregisterTable(None, tableName) @@ -405,7 +401,7 @@ class SQLContext(@transient val sparkContext: SparkContext) new GenericRow(map.values.toArray.asInstanceOf[Array[Any]]): Row } } - new SchemaRDD(this, SparkLogicalPlan(ExistingRdd(schema, rowRdd))) + new SchemaRDD(this, SparkLogicalPlan(ExistingRdd(schema, rowRdd))(self)) } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala index 019ff9d300a18..172b6e0e7f26b 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala @@ -418,7 +418,8 @@ class SchemaRDD( * @group schema */ private def applySchema(rdd: RDD[Row]): SchemaRDD = { - new SchemaRDD(sqlContext, SparkLogicalPlan(ExistingRdd(queryExecution.analyzed.output, rdd))) + new SchemaRDD(sqlContext, + SparkLogicalPlan(ExistingRdd(queryExecution.analyzed.output, rdd))(sqlContext)) } // ======================================================================= diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDDLike.scala b/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDDLike.scala index fe81721943202..fd751031b26e5 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDDLike.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDDLike.scala @@ -56,7 +56,7 @@ private[sql] trait SchemaRDDLike { // happen right away to let these side effects take place eagerly. case _: Command | _: InsertIntoTable | _: InsertIntoCreatedTable | _: WriteToFile => queryExecution.toRdd - SparkLogicalPlan(queryExecution.executedPlan) + SparkLogicalPlan(queryExecution.executedPlan)(sqlContext) case _ => baseLogicalPlan } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSQLContext.scala index 790d9ef22cf16..806097c917b91 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSQLContext.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSQLContext.scala @@ -92,7 +92,7 @@ class JavaSQLContext(val sqlContext: SQLContext) { new GenericRow(extractors.map(e => e.invoke(row)).toArray[Any]): ScalaRow } } - new JavaSchemaRDD(sqlContext, SparkLogicalPlan(ExistingRdd(schema, rowRdd))) + new JavaSchemaRDD(sqlContext, SparkLogicalPlan(ExistingRdd(schema, rowRdd))(sqlContext)) } /** @@ -120,7 +120,7 @@ class JavaSQLContext(val sqlContext: SQLContext) { * @group userf */ def jsonRDD(json: JavaRDD[String]): JavaSchemaRDD = - new JavaSchemaRDD(sqlContext, JsonRDD.inferSchema(json, 1.0)) + new JavaSchemaRDD(sqlContext, JsonRDD.inferSchema(sqlContext, json, 1.0)) /** * Registers the given RDD as a temporary table in the catalog. Temporary tables exist only diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala index 27dc091b85812..77c874d0315ee 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala @@ -19,12 +19,12 @@ package org.apache.spark.sql.execution import org.apache.spark.annotation.DeveloperApi import org.apache.spark.rdd.RDD -import org.apache.spark.sql.{Logging, Row} +import org.apache.spark.sql.{Logging, Row, SQLContext} import org.apache.spark.sql.catalyst.trees import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation import org.apache.spark.sql.catalyst.expressions.GenericRow import org.apache.spark.sql.catalyst.plans.QueryPlan -import org.apache.spark.sql.catalyst.plans.logical.BaseRelation +import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.plans.physical._ /** @@ -66,8 +66,8 @@ abstract class SparkPlan extends QueryPlan[SparkPlan] with Logging { * linking. */ @DeveloperApi -case class SparkLogicalPlan(alreadyPlanned: SparkPlan, tableName: String = "SparkLogicalPlan") - extends BaseRelation with MultiInstanceRelation { +case class SparkLogicalPlan(alreadyPlanned: SparkPlan)(@transient sqlContext: SQLContext) + extends LogicalPlan with MultiInstanceRelation { def output = alreadyPlanned.output override def references = Set.empty @@ -78,9 +78,15 @@ case class SparkLogicalPlan(alreadyPlanned: SparkPlan, tableName: String = "Spar alreadyPlanned match { case ExistingRdd(output, rdd) => ExistingRdd(output.map(_.newInstance), rdd) case _ => sys.error("Multiple instance of the same relation detected.") - }, tableName) - .asInstanceOf[this.type] + })(sqlContext).asInstanceOf[this.type] } + + @transient override lazy val statistics = Statistics( + // TODO: Instead of returning a default value here, find a way to return a meaningful size + // estimate for RDDs. See PR 1238 for more discussions. + sizeInBytes = BigInt(sqlContext.defaultSizeInBytes) + ) + } private[sql] trait LeafNode extends SparkPlan with trees.LeafNode[SparkPlan] { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala index c078e71fe0290..404d48ae05b45 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala @@ -17,11 +17,13 @@ package org.apache.spark.sql.execution +import scala.util.Try + import org.apache.spark.sql.{SQLContext, execution} import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.planning._ import org.apache.spark.sql.catalyst.plans._ -import org.apache.spark.sql.catalyst.plans.logical.{BaseRelation, LogicalPlan} +import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.plans.physical._ import org.apache.spark.sql.columnar.{InMemoryRelation, InMemoryColumnarTableScan} import org.apache.spark.sql.parquet._ @@ -47,9 +49,18 @@ private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] { /** * Uses the ExtractEquiJoinKeys pattern to find joins where at least some of the predicates can be * evaluated by matching hash keys. + * + * This strategy applies a simple optimization based on the estimates of the physical sizes of + * the two join sides. When planning a [[execution.BroadcastHashJoin]], if one side has an + * estimated physical size smaller than the user-settable threshold + * [[org.apache.spark.sql.SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]], the planner would mark it as the + * ''build'' relation and mark the other relation as the ''stream'' side. The build table will be + * ''broadcasted'' to all of the executors involved in the join, as a + * [[org.apache.spark.broadcast.Broadcast]] object. If both estimates exceed the threshold, they + * will instead be used to decide the build side in a [[execution.ShuffledHashJoin]]. */ object HashJoin extends Strategy with PredicateHelper { - private[this] def broadcastHashJoin( + private[this] def makeBroadcastHashJoin( leftKeys: Seq[Expression], rightKeys: Seq[Expression], left: LogicalPlan, @@ -61,33 +72,27 @@ private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] { condition.map(Filter(_, broadcastHashJoin)).getOrElse(broadcastHashJoin) :: Nil } - def broadcastTables: Seq[String] = sqlContext.joinBroadcastTables.split(",").toBuffer - def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { - case ExtractEquiJoinKeys( - Inner, - leftKeys, - rightKeys, - condition, - left, - right @ PhysicalOperation(_, _, b: BaseRelation)) - if broadcastTables.contains(b.tableName) => - broadcastHashJoin(leftKeys, rightKeys, left, right, condition, BuildRight) + case ExtractEquiJoinKeys(Inner, leftKeys, rightKeys, condition, left, right) + if Try(sqlContext.autoBroadcastJoinThreshold > 0 && + right.statistics.sizeInBytes <= sqlContext.autoBroadcastJoinThreshold).getOrElse(false) => + makeBroadcastHashJoin(leftKeys, rightKeys, left, right, condition, BuildRight) - case ExtractEquiJoinKeys( - Inner, - leftKeys, - rightKeys, - condition, - left @ PhysicalOperation(_, _, b: BaseRelation), - right) - if broadcastTables.contains(b.tableName) => - broadcastHashJoin(leftKeys, rightKeys, left, right, condition, BuildLeft) + case ExtractEquiJoinKeys(Inner, leftKeys, rightKeys, condition, left, right) + if Try(sqlContext.autoBroadcastJoinThreshold > 0 && + left.statistics.sizeInBytes <= sqlContext.autoBroadcastJoinThreshold).getOrElse(false) => + makeBroadcastHashJoin(leftKeys, rightKeys, left, right, condition, BuildLeft) case ExtractEquiJoinKeys(Inner, leftKeys, rightKeys, condition, left, right) => + val buildSide = + if (Try(right.statistics.sizeInBytes <= left.statistics.sizeInBytes).getOrElse(false)) { + BuildRight + } else { + BuildLeft + } val hashJoin = execution.ShuffledHashJoin( - leftKeys, rightKeys, BuildRight, planLater(left), planLater(right)) + leftKeys, rightKeys, buildSide, planLater(left), planLater(right)) condition.map(Filter(_, hashJoin)).getOrElse(hashJoin) :: Nil case _ => Nil @@ -273,8 +278,8 @@ private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] { execution.Limit(limit, planLater(child))(sqlContext) :: Nil case Unions(unionChildren) => execution.Union(unionChildren.map(planLater))(sqlContext) :: Nil - case logical.Except(left,right) => - execution.Except(planLater(left),planLater(right)) :: Nil + case logical.Except(left,right) => + execution.Except(planLater(left),planLater(right)) :: Nil case logical.Intersect(left, right) => execution.Intersect(planLater(left), planLater(right)) :: Nil case logical.Generate(generator, join, outer, _, child) => @@ -283,7 +288,7 @@ private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] { execution.ExistingRdd(Nil, singleRowRdd) :: Nil case logical.Repartition(expressions, child) => execution.Exchange(HashPartitioning(expressions, numPartitions), planLater(child)) :: Nil - case SparkLogicalPlan(existingPlan, _) => existingPlan :: Nil + case SparkLogicalPlan(existingPlan) => existingPlan :: Nil case _ => Nil } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/json/JsonRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/json/JsonRDD.scala index b48c70ee73a27..6c2b553bb908e 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/json/JsonRDD.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/json/JsonRDD.scala @@ -28,11 +28,12 @@ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.types._ import org.apache.spark.sql.execution.{ExistingRdd, SparkLogicalPlan} -import org.apache.spark.sql.Logging +import org.apache.spark.sql.{SQLContext, Logging} private[sql] object JsonRDD extends Logging { private[sql] def inferSchema( + sqlContext: SQLContext, json: RDD[String], samplingRatio: Double = 1.0): LogicalPlan = { require(samplingRatio > 0, s"samplingRatio ($samplingRatio) should be greater than 0") @@ -40,15 +41,17 @@ private[sql] object JsonRDD extends Logging { val allKeys = parseJson(schemaData).map(allKeysWithValueTypes).reduce(_ ++ _) val baseSchema = createSchema(allKeys) - createLogicalPlan(json, baseSchema) + createLogicalPlan(json, baseSchema, sqlContext) } private def createLogicalPlan( json: RDD[String], - baseSchema: StructType): LogicalPlan = { + baseSchema: StructType, + sqlContext: SQLContext): LogicalPlan = { val schema = nullTypeToStringType(baseSchema) - SparkLogicalPlan(ExistingRdd(asAttributes(schema), parseJson(json).map(asRow(_, schema)))) + SparkLogicalPlan( + ExistingRdd(asAttributes(schema), parseJson(json).map(asRow(_, schema))))(sqlContext) } private def createSchema(allKeys: Set[(String, DataType)]): StructType = { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetRelation.scala index 9c4771d1a9846..8c7dbd5eb4a09 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetRelation.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetRelation.scala @@ -27,6 +27,7 @@ import parquet.hadoop.ParquetOutputFormat import parquet.hadoop.metadata.CompressionCodecName import parquet.schema.MessageType +import org.apache.spark.sql.SQLContext import org.apache.spark.sql.catalyst.analysis.{MultiInstanceRelation, UnresolvedException} import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, LeafNode} @@ -45,7 +46,8 @@ import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, LeafNode} */ private[sql] case class ParquetRelation( path: String, - @transient conf: Option[Configuration] = None) extends LeafNode with MultiInstanceRelation { + @transient conf: Option[Configuration] = None) + extends LeafNode with MultiInstanceRelation { self: Product => diff --git a/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala index e17ecc87fd52a..025c396ef0629 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala @@ -19,8 +19,6 @@ package org.apache.spark.sql import org.apache.spark.sql.TestData._ import org.apache.spark.sql.catalyst.plans.{LeftOuter, RightOuter, FullOuter, Inner} -import org.apache.spark.sql.execution._ -import org.apache.spark.sql.test.TestSQLContext import org.apache.spark.sql.test.TestSQLContext._ class JoinSuite extends QueryTest { diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala index 156b090712df2..dff1d6a4b93bb 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala @@ -19,15 +19,16 @@ package org.apache.spark.sql.hive import scala.util.parsing.combinator.RegexParsers +import org.apache.hadoop.fs.Path +import org.apache.hadoop.hive.conf.HiveConf import org.apache.hadoop.hive.metastore.api.{FieldSchema, StorageDescriptor, SerDeInfo} import org.apache.hadoop.hive.metastore.api.{Table => TTable, Partition => TPartition} import org.apache.hadoop.hive.ql.metadata.{Hive, Partition, Table} import org.apache.hadoop.hive.ql.plan.TableDesc -import org.apache.hadoop.hive.ql.session.SessionState import org.apache.hadoop.hive.serde2.Deserializer import org.apache.spark.annotation.DeveloperApi -import org.apache.spark.sql.Logging +import org.apache.spark.sql.{SQLContext, Logging} import org.apache.spark.sql.catalyst.analysis.{EliminateAnalysisOperators, Catalog} import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.logical @@ -64,9 +65,8 @@ private[hive] class HiveMetastoreCatalog(hive: HiveContext) extends Catalog with // Since HiveQL is case insensitive for table names we make them all lowercase. MetastoreRelation( - databaseName, - tblName, - alias)(table.getTTable, partitions.map(part => part.getTPartition)) + databaseName, tblName, alias)( + table.getTTable, partitions.map(part => part.getTPartition))(hive) } def createTable( @@ -251,7 +251,11 @@ object HiveMetastoreTypes extends RegexParsers { private[hive] case class MetastoreRelation (databaseName: String, tableName: String, alias: Option[String]) (val table: TTable, val partitions: Seq[TPartition]) - extends BaseRelation { + (@transient sqlContext: SQLContext) + extends LeafNode { + + self: Product => + // TODO: Can we use org.apache.hadoop.hive.ql.metadata.Table as the type of table and // use org.apache.hadoop.hive.ql.metadata.Partition as the type of elements of partitions. // Right now, using org.apache.hadoop.hive.ql.metadata.Table and @@ -264,6 +268,21 @@ private[hive] case class MetastoreRelation new Partition(hiveQlTable, p) } + @transient override lazy val statistics = Statistics( + sizeInBytes = { + // TODO: check if this estimate is valid for tables after partition pruning. + // NOTE: getting `totalSize` directly from params is kind of hacky, but this should be + // relatively cheap if parameters for the table are populated into the metastore. An + // alternative would be going through Hadoop's FileSystem API, which can be expensive if a lot + // of RPCs are involved. Besides `totalSize`, there are also `numFiles`, `numRows`, + // `rawDataSize` keys that we can look at in the future. + BigInt( + Option(hiveQlTable.getParameters.get("totalSize")) + .map(_.toLong) + .getOrElse(sqlContext.defaultSizeInBytes)) + } + ) + val tableDesc = new TableDesc( Class.forName(hiveQlTable.getSerializationLib).asInstanceOf[Class[Deserializer]], hiveQlTable.getInputFormatClass, @@ -275,14 +294,14 @@ private[hive] case class MetastoreRelation hiveQlTable.getMetadata ) - implicit class SchemaAttribute(f: FieldSchema) { - def toAttribute = AttributeReference( - f.getName, - HiveMetastoreTypes.toDataType(f.getType), - // Since data can be dumped in randomly with no validation, everything is nullable. - nullable = true - )(qualifiers = tableName +: alias.toSeq) - } + implicit class SchemaAttribute(f: FieldSchema) { + def toAttribute = AttributeReference( + f.getName, + HiveMetastoreTypes.toDataType(f.getType), + // Since data can be dumped in randomly with no validation, everything is nullable. + nullable = true + )(qualifiers = tableName +: alias.toSeq) + } // Must be a stable value since new attributes are born here. val partitionKeys = hiveQlTable.getPartitionKeys.map(_.toAttribute) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala new file mode 100644 index 0000000000000..a61fd9df95c94 --- /dev/null +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.hive + +import scala.reflect.ClassTag + +import org.apache.spark.sql.{SQLConf, QueryTest} +import org.apache.spark.sql.execution.{BroadcastHashJoin, ShuffledHashJoin} +import org.apache.spark.sql.hive.test.TestHive +import org.apache.spark.sql.hive.test.TestHive._ + +class StatisticsSuite extends QueryTest { + + test("estimates the size of a test MetastoreRelation") { + val rdd = hql("""SELECT * FROM src""") + val sizes = rdd.queryExecution.analyzed.collect { case mr: MetastoreRelation => + mr.statistics.sizeInBytes + } + assert(sizes.size === 1) + assert(sizes(0).equals(BigInt(5812)), + s"expected exact size 5812 for test table 'src', got: ${sizes(0)}") + } + + test("auto converts to broadcast hash join, by size estimate of a relation") { + def mkTest( + before: () => Unit, + after: () => Unit, + query: String, + expectedAnswer: Seq[Any], + ct: ClassTag[_]) = { + before() + + var rdd = hql(query) + + // Assert src has a size smaller than the threshold. + val sizes = rdd.queryExecution.analyzed.collect { + case r if ct.runtimeClass.isAssignableFrom(r.getClass) => r.statistics.sizeInBytes + } + assert(sizes.size === 2 && sizes(0) <= autoBroadcastJoinThreshold, + s"query should contain two relations, each of which has size smaller than autoConvertSize") + + // Using `sparkPlan` because for relevant patterns in HashJoin to be + // matched, other strategies need to be applied. + var bhj = rdd.queryExecution.sparkPlan.collect { case j: BroadcastHashJoin => j } + assert(bhj.size === 1, + s"actual query plans do not contain broadcast join: ${rdd.queryExecution}") + + checkAnswer(rdd, expectedAnswer) // check correctness of output + + TestHive.settings.synchronized { + val tmp = autoBroadcastJoinThreshold + + hql(s"""SET ${SQLConf.AUTO_BROADCASTJOIN_THRESHOLD}=-1""") + rdd = hql(query) + bhj = rdd.queryExecution.sparkPlan.collect { case j: BroadcastHashJoin => j } + assert(bhj.isEmpty, "BroadcastHashJoin still planned even though it is switched off") + + val shj = rdd.queryExecution.sparkPlan.collect { case j: ShuffledHashJoin => j } + assert(shj.size === 1, + "ShuffledHashJoin should be planned when BroadcastHashJoin is turned off") + + hql(s"""SET ${SQLConf.AUTO_BROADCASTJOIN_THRESHOLD}=$tmp""") + } + + after() + } + + /** Tests for MetastoreRelation */ + val metastoreQuery = """SELECT * FROM src a JOIN src b ON a.key = 238 AND a.key = b.key""" + val metastoreAnswer = Seq.fill(4)((238, "val_238", 238, "val_238")) + mkTest( + () => (), + () => (), + metastoreQuery, + metastoreAnswer, + implicitly[ClassTag[MetastoreRelation]] + ) + } + +} diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala index b4dbf2b115799..6c8fe4b196dea 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala @@ -132,7 +132,7 @@ abstract class HiveComparisonTest answer: Seq[String]): Seq[String] = { def isSorted(plan: LogicalPlan): Boolean = plan match { - case _: Join | _: Aggregate | _: BaseRelation | _: Generate | _: Sample | _: Distinct => false + case _: Join | _: Aggregate | _: Generate | _: Sample | _: Distinct => false case PhysicalOperation(_, _, Sort(_, _)) => true case _ => plan.children.iterator.exists(isSorted) } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala index a022a1e2dc70e..50f85289fdad8 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala @@ -21,7 +21,7 @@ import scala.util.Try import org.apache.spark.sql.hive.test.TestHive import org.apache.spark.sql.hive.test.TestHive._ -import org.apache.spark.sql.{SchemaRDD, Row} +import org.apache.spark.sql.{Row, SchemaRDD} case class TestData(a: Int, b: String) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/parquet/HiveParquetSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/parquet/HiveParquetSuite.scala index 91ad59d7f82c0..3bfe49a760be5 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/parquet/HiveParquetSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/parquet/HiveParquetSuite.scala @@ -35,7 +35,7 @@ class HiveParquetSuite extends FunSuite with BeforeAndAfterAll with BeforeAndAft override def beforeAll() { // write test data - ParquetTestData.writeFile + ParquetTestData.writeFile() testRDD = parquetFile(ParquetTestData.testDir.toString) testRDD.registerAsTable("testsource") } From 2c356665c986564482ccfb3f880f0a2c023a7cb7 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Tue, 29 Jul 2014 17:52:48 -0700 Subject: [PATCH 2/6] MAINTENANCE: Automated closing of pull requests. This commit exists to close the following pull requests on Github: Closes #740 (close requested by 'rxin') Closes #647 (close requested by 'rxin') Closes #1383 (close requested by 'rxin') Closes #1485 (close requested by 'pwendell') Closes #693 (close requested by 'rxin') Closes #478 (close requested by 'JoshRosen') From 39b8193102ebf32ef6b40631a949318b281d44a1 Mon Sep 17 00:00:00 2001 From: Michael Armbrust Date: Tue, 29 Jul 2014 18:14:20 -0700 Subject: [PATCH 3/6] [SPARK-2716][SQL] Don't check resolved for having filters. For queries like `... HAVING COUNT(*) > 9` the expression is always resolved since it contains no attributes. This was causing us to avoid doing the Having clause aggregation rewrite. Author: Michael Armbrust Closes #1640 from marmbrus/havingNoRef and squashes the following commits: 92d3901 [Michael Armbrust] Don't check resolved for having filters. --- .../sql/catalyst/analysis/Analyzer.scala | 2 +- ...erences-0-d2de3ba23759d25ef77cdfbab72cbb63 | 136 ++++++++++++++++++ .../sql/hive/execution/HiveQuerySuite.scala | 3 + 3 files changed, 140 insertions(+), 1 deletion(-) create mode 100644 sql/hive/src/test/resources/golden/having no references-0-d2de3ba23759d25ef77cdfbab72cbb63 diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala index 02bdb64f308a5..74c0104e5b17f 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala @@ -159,7 +159,7 @@ class Analyzer(catalog: Catalog, registry: FunctionRegistry, caseSensitive: Bool object UnresolvedHavingClauseAttributes extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transformUp { case filter @ Filter(havingCondition, aggregate @ Aggregate(_, originalAggExprs, _)) - if !filter.resolved && aggregate.resolved && containsAggregate(havingCondition) => { + if aggregate.resolved && containsAggregate(havingCondition) => { val evaluatedCondition = Alias(havingCondition, "havingCondition")() val aggExprsWithHaving = evaluatedCondition +: originalAggExprs diff --git a/sql/hive/src/test/resources/golden/having no references-0-d2de3ba23759d25ef77cdfbab72cbb63 b/sql/hive/src/test/resources/golden/having no references-0-d2de3ba23759d25ef77cdfbab72cbb63 new file mode 100644 index 0000000000000..3f2cab688ccc2 --- /dev/null +++ b/sql/hive/src/test/resources/golden/having no references-0-d2de3ba23759d25ef77cdfbab72cbb63 @@ -0,0 +1,136 @@ +0 +5 +12 +15 +18 +24 +26 +35 +37 +42 +51 +58 +67 +70 +72 +76 +83 +84 +90 +95 +97 +98 +100 +103 +104 +113 +118 +119 +120 +125 +128 +129 +134 +137 +138 +146 +149 +152 +164 +165 +167 +169 +172 +174 +175 +176 +179 +187 +191 +193 +195 +197 +199 +200 +203 +205 +207 +208 +209 +213 +216 +217 +219 +221 +223 +224 +229 +230 +233 +237 +238 +239 +242 +255 +256 +265 +272 +273 +277 +278 +280 +281 +282 +288 +298 +307 +309 +311 +316 +317 +318 +321 +322 +325 +327 +331 +333 +342 +344 +348 +353 +367 +369 +382 +384 +395 +396 +397 +399 +401 +403 +404 +406 +409 +413 +414 +417 +424 +429 +430 +431 +438 +439 +454 +458 +459 +462 +463 +466 +468 +469 +478 +480 +489 +492 +498 diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala index 50f85289fdad8..aadfd2e900151 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala @@ -30,6 +30,9 @@ case class TestData(a: Int, b: String) */ class HiveQuerySuite extends HiveComparisonTest { + createQueryTest("having no references", + "SELECT key FROM src GROUP BY key HAVING COUNT(*) > 1") + createQueryTest("boolean = number", """ |SELECT From 86534d0f5255362618c05a07b0171ec35c915822 Mon Sep 17 00:00:00 2001 From: Michael Armbrust Date: Tue, 29 Jul 2014 18:20:51 -0700 Subject: [PATCH 4/6] [SPARK-2631][SQL] Use SQLConf to configure in-memory columnar caching Author: Michael Armbrust Closes #1638 from marmbrus/cachedConfig and squashes the following commits: 2362082 [Michael Armbrust] Use SQLConf to configure in-memory columnar caching --- sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala | 4 ++++ sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala | 2 -- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala index be8d4e15ec4b0..5d85a0fd4eebb 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala @@ -22,6 +22,7 @@ import java.util.Properties import scala.collection.JavaConverters._ object SQLConf { + val COMPRESS_CACHED = "spark.sql.inMemoryColumnarStorage.compressed" val AUTO_BROADCASTJOIN_THRESHOLD = "spark.sql.autoBroadcastJoinThreshold" val SHUFFLE_PARTITIONS = "spark.sql.shuffle.partitions" val DEFAULT_SIZE_IN_BYTES = "spark.sql.defaultSizeInBytes" @@ -49,6 +50,9 @@ trait SQLConf { /** ************************ Spark SQL Params/Hints ******************* */ // TODO: refactor so that these hints accessors don't pollute the name space of SQLContext? + /** When true tables cached using the in-memory columnar caching will be compressed. */ + private[spark] def useCompression: Boolean = get(COMPRESS_CACHED, "false").toBoolean + /** Number of partitions to use for shuffle operators. */ private[spark] def numShufflePartitions: Int = get(SHUFFLE_PARTITIONS, "200").toInt diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala index a136c7b3ffef5..c2bdef732372c 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala @@ -192,8 +192,6 @@ class SQLContext(@transient val sparkContext: SparkContext) currentTable.logicalPlan case _ => - val useCompression = - sparkContext.conf.getBoolean("spark.sql.inMemoryColumnarStorage.compressed", false) InMemoryRelation(useCompression, executePlan(currentTable).executedPlan) } From 22649b6cde8e18f043f122bce46f446174d00f6c Mon Sep 17 00:00:00 2001 From: Josh Rosen Date: Tue, 29 Jul 2014 19:02:06 -0700 Subject: [PATCH 5/6] [SPARK-2305] [PySpark] Update Py4J to version 0.8.2.1 Author: Josh Rosen Closes #1626 from JoshRosen/SPARK-2305 and squashes the following commits: 03fb283 [Josh Rosen] Update Py4J to version 0.8.2.1. --- LICENSE | 4 ++-- bin/pyspark | 2 +- bin/pyspark2.cmd | 2 +- core/pom.xml | 2 +- .../apache/spark/api/python/PythonUtils.scala | 2 +- python/lib/py4j-0.8.1-src.zip | Bin 37662 -> 0 bytes python/lib/py4j-0.8.2.1-src.zip | Bin 0 -> 37562 bytes sbin/spark-config.sh | 2 +- sbin/spark-executor | 2 +- 9 files changed, 8 insertions(+), 8 deletions(-) delete mode 100644 python/lib/py4j-0.8.1-src.zip create mode 100644 python/lib/py4j-0.8.2.1-src.zip diff --git a/LICENSE b/LICENSE index 65e1f480d9b14..76a3601c66918 100644 --- a/LICENSE +++ b/LICENSE @@ -272,7 +272,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ======================================================================== -For Py4J (python/lib/py4j0.7.egg and files in assembly/lib/net/sf/py4j): +For Py4J (python/lib/py4j-0.8.2.1-src.zip) ======================================================================== Copyright (c) 2009-2011, Barthelemy Dagenais All rights reserved. @@ -532,7 +532,7 @@ The following components are provided under a BSD-style license. See project lin (New BSD license) Protocol Buffer Java API (org.spark-project.protobuf:protobuf-java:2.4.1-shaded - http://code.google.com/p/protobuf) (The BSD License) Fortran to Java ARPACK (net.sourceforge.f2j:arpack_combined_all:0.1 - http://f2j.sourceforge.net) (The BSD License) xmlenc Library (xmlenc:xmlenc:0.52 - http://xmlenc.sourceforge.net) - (The New BSD License) Py4J (net.sf.py4j:py4j:0.8.1 - http://py4j.sourceforge.net/) + (The New BSD License) Py4J (net.sf.py4j:py4j:0.8.2.1 - http://py4j.sourceforge.net/) (Two-clause BSD-style license) JUnit-Interface (com.novocode:junit-interface:0.10 - http://github.com/szeiger/junit-interface/) (ISC/BSD License) jbcrypt (org.mindrot:jbcrypt:0.3m - http://www.mindrot.org/) diff --git a/bin/pyspark b/bin/pyspark index 69b056fe28f2c..39a20e2a24a3c 100755 --- a/bin/pyspark +++ b/bin/pyspark @@ -52,7 +52,7 @@ export PYSPARK_PYTHON # Add the PySpark classes to the Python path: export PYTHONPATH=$SPARK_HOME/python/:$PYTHONPATH -export PYTHONPATH=$SPARK_HOME/python/lib/py4j-0.8.1-src.zip:$PYTHONPATH +export PYTHONPATH=$SPARK_HOME/python/lib/py4j-0.8.2.1-src.zip:$PYTHONPATH # Load the PySpark shell.py script when ./pyspark is used interactively: export OLD_PYTHONSTARTUP=$PYTHONSTARTUP diff --git a/bin/pyspark2.cmd b/bin/pyspark2.cmd index 0ef9eea95342e..2c4b08af8d4c3 100644 --- a/bin/pyspark2.cmd +++ b/bin/pyspark2.cmd @@ -45,7 +45,7 @@ rem Figure out which Python to use. if [%PYSPARK_PYTHON%] == [] set PYSPARK_PYTHON=python set PYTHONPATH=%FWDIR%python;%PYTHONPATH% -set PYTHONPATH=%FWDIR%python\lib\py4j-0.8.1-src.zip;%PYTHONPATH% +set PYTHONPATH=%FWDIR%python\lib\py4j-0.8.2.1-src.zip;%PYTHONPATH% set OLD_PYTHONSTARTUP=%PYTHONSTARTUP% set PYTHONSTARTUP=%FWDIR%python\pyspark\shell.py diff --git a/core/pom.xml b/core/pom.xml index a24743495b0e1..4f061099a477d 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -275,7 +275,7 @@ net.sf.py4j py4j - 0.8.1 + 0.8.2.1 diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala b/core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala index 6d3e257c4d5df..52c70712eea3d 100644 --- a/core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala +++ b/core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala @@ -29,7 +29,7 @@ private[spark] object PythonUtils { val pythonPath = new ArrayBuffer[String] for (sparkHome <- sys.env.get("SPARK_HOME")) { pythonPath += Seq(sparkHome, "python").mkString(File.separator) - pythonPath += Seq(sparkHome, "python", "lib", "py4j-0.8.1-src.zip").mkString(File.separator) + pythonPath += Seq(sparkHome, "python", "lib", "py4j-0.8.2.1-src.zip").mkString(File.separator) } pythonPath ++= SparkContext.jarOfObject(this) pythonPath.mkString(File.pathSeparator) diff --git a/python/lib/py4j-0.8.1-src.zip b/python/lib/py4j-0.8.1-src.zip deleted file mode 100644 index 2069a328d1f2e6a94df057c6a3930048ae3f3832..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 37662 zcmb4qW00lml5Uw@R(IL9ZQHhO+qP|2mu=g&ZCkhZnK^f6pF0yXcSYnE>(7_#ja+X& zPf}I_7z7I7uTNqGf#5&B{QCnG029F0jnSM&Nf8nNc*I~>{`HSIxk3W~f}8*Y0Q}=2 z``^9}{m<7OjT{^u{zHka;eV+F`OmLH08DZd@;A3k22KCb9|R2mfbn-F=6cS0I#znN zI)CY;wsrf*$>`sYWdD+XIoYN1woL&my!UbS&1{Hoq5$NkR~hx(5#pM4b#ocBQj`e}AF+Qsz!IK%djA7H>1%Ch;U{;tu|W`SF9aXjcT zl;^-?i^*str1(K=^1}5Z765nW{w5InPGw{M`jK;zH~e|@G6vbQ6Y@gPeFyMyqV~$< z;S`V+a}(ml@^Uw+;%P@1)!NL)RppgRHb%d~sPpF{)u*|W^6TiRu(vNeuv$r6fmz&f zW1?oEd#1=siOQIaM6fMPaYl8nsc!}!#*uZT`tR}L4W()~lF4`BT8-2owFt^^SyI0Q z=zDi!QJ))ZH;|xv?=joZkcKjoDz#Px48PH#T^}znus&CaTh@vn{LSf5{EO)1$fhqf zo^w6$L8`cz=`iy$GB6l*a%Cdmv#7aqK=Gi^PGVwGQb;-BW)_bJDOIUwhZX8!0*oN$ zdgn%Hs!i%~Q!xa9q|)bDcN_&44`wzu#a9?iq#G$n;fX}@qkk#d2@4|WdA&^%KS81k z*yFptZ$T*2{l2JvLLwcC1Q04N+GEHLz(L{9%PjuL-kJ<8Y?MksFRuzBAkzSLT{uX- zoeV~`MeCfo2r#9FIZACrZ1681Mk+!=}aULvzNXL^eYp#FUxJL19RH+ z3UgA;mOiBb-GH{}^N5!t&y0KSGAYD+E1a|gXHVI08CxKv6o(2lLo6UAJL=gXX7O;a za#soIbnV2FxmN+QXpJpbs^}^)EV0=N-&)@^DTeEmFPh*Wv41Zy7L_ny3I%Jr9`Y+0iU!4K%Ayw0uV)2sUK;hXe7Tbn4$YSEHi&>au>e%jr`iC(k;U+;_A zzS@0}4UUFu=i)wj@(?@AbP3KlvaEZr4g99G4?M~g7clMHF-GLN*_(O@JNN}Ej5z8r zeXovwqXHin%7sn^ucGmCkMITub(APK`6{4t$V=FL8qtk@PLqI57Pt_IlOcEQ0_Ihh zU_%q98TAetPtpB8X%>WXBF`$10-xcydo5cX$heJ=3i$Gs}C1hyVZ%dH?`8 zf7ka9%rVh(G;+~%`y1{wakt3-CI5;$oafH#L$lwn$QY;KJ&F9Fv2`d}1=-~EDC3Kl zNMV^EMlKD1<3vT{Q z7>^DVqb)d8jYmb^`&A0Om}ugV+qfiQF|~l7*ES-No$y1owPBr%I92Ki?C zA$!e5cOASf)j>1{YPJG!ujp-y43K7CQDBA%mKW9)^Dl&k{HjRm9o92(An~ zvBe}D^`aO?ZzRe}BPNyQQq+pqGXmdzIJ!~kr|G4lE(raJA)7VwytF<*oJS64GKF9VETBXd z2n4hHIk`S%aFVkSkae~`h6feunlA7^uqoPNS1edh(~ZL4ksPTP#)%}n zPKA#U1LqSwEo>>#iMk@)dotg^xR^^Swft8Zv=K~(Xp`Ot00nP2QR(k^3HZHEw??*U z<=>5j;Y!xzTZHU*>`(j;Um?H4an%PteW=Syt~-pf(IWTcUY>Cucw-6>28N2JS=%n_ z!AVO!i%lFed{l)CsY1TT9VSchYG-`Kz!eh_OnyfI<6@xOV~L8V88n8^)(f&9{1BH( z*3VxrTLBx;Nt3^OPy#iZ8i^6{hT^>2#SvVlY>y0}kc5%rNo!l(Eoc*9%%jYyfI8uw zSK8^fnP)AHBQOB?;Oq7OjHCG#>52A*Ll6^(1Bo3!Is%6<6EX|QS@;3!9`(kcmpw)P z!($xtXbTM!3&~i@Ea43Rc|rJqu4zW9NJ)w(EcG0-tat{B6c`vs8FvjTvFg{vaBEl< z)&A*L-jcQNP*O9@Uh+raZ7Aa=7*zWXCwz)7EI)52YXolFO{jH7WST$<{D5Yv2`|9C zZ8QL2G=FHu&kRsIyfUk0&>M#H9igP7Zc-Mu3GLYZK?18?SH6L}0DOkNaJDwi-+dcA z@D3dY7f#89@9Kz3)hhyYHC3&K{uO&?KcTwEY*(fFj68q=XT-rO(Z2>Vz~%fww^rd{ zf@YRuledQA0&m;!gsX#_;K53sR$`j)XZ6UG#b&dJ)5pdzd=}2yd7Z_zYCr^UYln{r zZ8MF2)`cbWQx7z-Q#DQHJ+(ov;G>^*!7I;`_FsaIDk145L=GTkUb{v7Z)#W1!&vHV~VM(xxk^d33`o{LP|9 z9C1*kmuv-?g?T{SiH;6mKRq1JL0~Ig!2N;lo&QXWexmb!Cwn&=@sR{*1rDM)pC9th zW#p^XQNT$*AHBlSctV0ouRZc0RikE@wYqou*`e#! z)ODeo0gR5e?cI$nYY#il-`E#!FFcnSO5}?a6U`75do&YQEDiI*;up4$%l1Th)kF&J2P;8k+P&4GSm_S0`Tc>5m1%Y{Qr= z)^$`V&ClY~q9(#(4{V_+HFuj>lB(DpNN7n(5kOUpN2{xft*$^92~@cfh#X0pzZ;s= zIXhysrlVARUheYZw7Zp}?69*)YMDi1*RVnQ$9C&ME-^@4?XQ7)kd5R#q|qKtZOjr`-nf zSfqzZn|q`)^kFo0LMo0EN1X#N#9{T~WN_fe-lSHiWEM%oX^N~X%dW3k<9k#sDeU(p ziwO)Z5l;c=Nklst5SVzj#&xQ>7*_9~7A2p=erUE$BqbOWR)BKActViP)?0~wS1)m* z`T4k9p}}TwR~jlU5#Od-ZX!3P_%%&zop0$qD_z&H!RlzrAz09|S<-mie55z#7Zh5B+r z3$k~pB50ZuK>2Hswl9 zT{?G_k+TPE)b90IoU`9F1{1f3%X81Yh^E6H25jThCZ9W58O>zcL!vO^&0QCWA`t28 z!bHstmoaBYCdnk`T)K~O7T5nB2nT7B)Tc&1=#M{yI~sUsi`eCaj;XO2s+^ZE*tz3) zwu=7)n>yB_NX&6{S}0XUoRyy8OeS`bmM7PtI1w|sg;+~2DAH^wTNzR)-Pm;NeV}`R z&?+f_lnuSLeTHqh@4mOY6*_CLIZ$JijcQRCNTJpTlFdTG9&}!vMo)uVv!^GA`9!Vp zcSE*QB6{SBm=$<|5~0h;6{EUdTkrwbpor^Z>5!Jle zIdf2PtjRDdAGb)5tN4Zp_XggC8hPNwy`_rHY8h|ztjK&7!qC)0ERcMPSe?@+!sn1& z+H%5C_n)EqVqW7{H^CI$h7l#NvVmF--zBg-OVP(@qy3SuAM|8PV%T&0U>H2Tv8sGbyj;) zI2pNbhl0pCR)?N{1K|iC&V>csn3ca@tE>`|F2+WLFfm>pW<@bjJFcT%o7QahX*rmZ z;ZS#HCO?IZXF0MbMc*S;Q^yi$T!5Ti6((F-+<~F4@6yPWu_-_qO;M`4Bl)wd*w_i@ zy9r<0tpfMmVZjW$ifLC(*H4%QX1Z>G#>!91O(SOA!>X{GEh1cKFh34R?Hxn%DHAl8 zIjH>e$KQB7}kHJ zbvTkF^7Nq`Zg?94#i~T4<1>K2unW$}L!_VW(oHJ{{pKv+Ps0$D06Y_{AE>KP;Sia& z1qi$duLzl+j$!+GuMowqtLf3QGtQIxD)t3HLLa??OseWy6RxjpY`9pAS!};VfQ`Ya z`Em=-`92q<3)mm>QB+fzAs1lg)F@45V!iK#Ie_F7BA>&$NAQS7X%Mi@nhYwk%R7)r-T9Jfu)uHVwC}2Yk2w9GX9=a-;U^;AaY;z}}K+ zrTl#+zi<&-y4&zdyt^=HA4Rqh@R-IYlr~5nGfX}YH{vKvBtt3Tn2c!(M*@yX zRA-nT0q6l~e4q1P+(Z##51SC zQ&)GUjpt;dv$I!a*hY=sEZnFXnD4R39a|z@UaBPXesbknx?OVrR*U8?3z;xePV;9( zgayTPIa@}9%+Mjv#r%0i@6Ca^X;ILj#t~JQN-~f~`xqh<>G2T8Jg?G8NFKC0(sk@O z?hddwE_dIC<-P|`%;0+yv30}E3AqoNLCUu0!ZOHtQ`f|DU9?0V#e49xx;3e*=!$NR z^Sd=QNQ8maSAp_{qu$Y@BN66a{Fg9RqA;i&1|wVDiYy=y>%7%g?J=!$^YUj|VF5kV z1~r}Zh(YvN&vds&e8B2mWtY73BUi+pzZ)0@3DWRjvoLvK`B3#oq8FwoS%)a1snvCY zrvuP4k^HSaP4Nhg;Z98R*3)j-x)mDkMp-I;<0#zl0G0fRb0|g=vhfe{PkGn)({#?& zvqp_}x0X4DK7KwY33#bMB>zg83j6u*WyAQ3bmGutH$KODG!|ns%;OZXZY6Gi(!TIs zsOEvv5GAOt7nEIZ(N3Ws!kT=semfN_J@gz$+#im*OO;RcS;GzCsi8x03Eh_$7v;PK z?J!XdE&*(h^qUq+&eW$E2wD3+CBof@;&M>gSv zhT<9RuScF-Iq)!Dmj1QqJ&tUWi0TKfZMhQLX|*-I658o8C3S-#cX!>6kpl4|dGS=S z=%9xdf$>spy0^$nIYdGGF&|0?)$aFMcMeucdkvZs&#F!<%-UJowU=y8Uh7*3d%u;U zD`)1v!T+Opf(4fcsw*uMG4oE%o&bEdEx#%Jn;z{Y(C}dZlJ% zv(60vb*0nWfQsQ}ljQjN-^4p+<4Va5z{KYeB$-*xVZ`8%Xn{)l^AeDPSl7|B|fN>C)!feoT& zZG%hNW5PvmqSpwD;Q~4T*6co8PLiP$l&;<@Oj(UR&GF2h_WnI3jI3h-tsfFClhb4{ zQDuFy>he3`DiuoKpL+vbzdSKpjsr&JFp6A-Hyq>aa;W!(3w9HJiL+TkPy&dEK?DSD z!W*wvqeE0NNk)hC&%A9Q7pGe-cwQ0RD9q9vhgOhfDX=S;C~o|SC;Y)V0gTQ2_4^rAsrr<3Jkar1t!QJB*apGLP9)W#1VfRBY8!#e~#l_7(VL+*=k!5;ja1fN8 zRp5Gx_+)bRy?+)&YHCJjxTM6?o{|6fdOEn^_9s?%Tuj2OE6`j)Z6c1(Pgs!x;4)G^ zFDwMqpIJy-)d9WKfmm$DC^C_}oa}|0>ud|L{`+utNvvq>#GY7TANq(o0r_q zSDEmslK+imq4B&Yt?IV@QM7MX^s5IpS*dC)QmI1N@&IX;%FmSdgFC{Y6hsJ*QPyb_ zfIRb@Tr>^Y!AQ!Y0Awry(28!AOiJyYCL5CtVT1lLktk3a9-ikA5DHTmDY-0ZoN5~C z2!lHm3wpr1axy7zbHxWT4X#b)D2s4kS=UCiaudMKivKObz~}RU?S@9E1vJChn6y~) zeAQ7vQ=#XkjZwY`s*emmBZ0_-vTo8UO_sTpg325})SfIFE17$hb#D^B1i#>S?6G<| zQ1@Er7{ZXQUF~grTDZM^%x`H7Cl1@ya3y;JEx$W+&*d&kD~CMsaAa|cSJ5s_qZJa# z&V0U)ku*dL-#A+8Qgbq?hc#_L&g5IN+Eqz?TSeHixJejOrHkh1z>$;}B%{1-f5`-U zFheC46$_4G`J11SR_S7P-iGS&D8R6hOi2W#)tGA;qxilGF5!i3SxcXfd7S%?DK8es z>&sWo$-pPVKlflaYI0IH!ig}{q-0f;S(S*-Sv&gzZ{Wg??is9T3LWII-|vJS(Pigk z4ml*UAJUzbmH>rH4uS)dv+ZfuzHM8`LcG2q5Je!$8xB$hD23lDJ7F`$T9*NyReE&$dd%<)Mdv7M4|XLpsZb^Mk9 ziOM9?B*v;{?fLXUCKnk)^A0t^JpD)+Q+}X$#qH;k-P$H23YwVGbGq=rM3WRHGEuWN zJSpRqMELgg1E$w*NcG52KYe1Ul`C}YHsKOZFyvR(&ER81evR6iD)kptQjL2TK#1kd zc-O!}gaWhYSe;FI%^(Edo9tJ$M5djxn0xRci>S38Ih zoG~NVuyoD?!BlC@5;Iv!p;Kfb0tSl#GnH~>3swoO(md8ppNKNodk03qf1mR85z6cO zxblIRtm0j@R!C{T+#_vq7~x$nk;;g|l+qJj3L=7Te0dvXK?=qM22A{XTSSKos_fTI z?4(e>G8~Cc>j(S5lXqWHa+B+4(2yi!VvWSlk_D`$nFXLhGRZCe=Ehj_Z^U=>_<}RC` zI-SU6&%0qQfu~D~%@#@iB1-Pk2x0S)2m@%T{W$*NoWx*7)F;1+LHZ(j;Q+N-hX;9l zb=7-Zu6B20WU??2KtMq@d=xvWgZyUXbdSo-)b z+9vr|!+-2iNxq={2buoWS&FB+G5GjHrsre;f2j1|JK+D;!v04m$kNQg@oz->-?g9B zwHz{7k-eU)WwXF-BYs6*Ixp34tuor(*QOCGv}auzaz^SO{6r%_6-4}gPZ|gTh1hFS z+X$JI91?xbt|+J=zyk@-Z;%E8fvf|3>LFA2-Qs{r4ZhW=cqEA&fdVJ61qZIrs zaea2TPYZ4hi>NI8DeU6i70imUu+}IB0X0AKMDd;_Td+%O(vo8hiC z*H+&mep3j8)X`%tY{k)Hn4N&z<2MxXA@55|uU7>bp=5;cg{`TcGOQC_nX$jx3dY|e zsN51B8imuTTwzo|1OX}49F)B?^c?jXR9eU04e0Xwet_jT5a z+A%W|HE5{$G_ZGH8h_aZe?GPCL0q88owcZdqBIqcXc6o;dzJBi`MriogN2*`Zvz3EK8{f$vj<7OkCvu}X*Zp}@=) zvX>M32^1HuiWFW+!gJ2_nD5&qv9DGk>i>5+?q5AhPAMS$!X=UiHWpH5-vxC zJ0$@eRR6+zLrQ6cBH}%$)BX^0w}oMN5MU*WlV;X+w)qPN27c9DL(wV%lBW2C zeX1xL{4&3ffjpR@aApni-Sa@d!^4BwP2NtgEzbvnvtMvge{*s!{z!nj>>lfMdXN!B z<4y{>bfnk%q$y^nW2(klV#mnOF!bUH^ktPV5fjX=BJ*Bqc=*RJMXn6sq=gHlyr1zhk6HIgXh<4ncxSJz zzJD?YeHMazex9?{LAL8wkh*6&QK3VYl1ezAUvkTnj80o(o6n;+Q8bxigY zQh<)3<5Y2_xl_)kE1d-?Y+2|5O@ z>cT3SJzNO`Mk{?-?OEsIqdtWq9m9M|j-QPM1A(fTey37!aGoizn}p9%P*Frg27BC{ zm7KM?d00umu`)tJIy%7;4opR;m=uh$rD_U8SuLIYkg04XLs%J^J`Z!F|D`o`gqJ79 zxZRL7B$kR&F$HETvuZCmh$>87P>In)11~qAPA&$Xwq)8HHe4ClGVkQA70 zHRbR<0Dy2vGupkjpx2rU#xxMtO@={SSQE5hE;CCmHFKu)n(UeF=W8KwS54B$H53Qf zA+S!m#YAS6w;IFV=l(ihq7Jfdg=FgJsgifrTj&l{S_{Y1hX+kLY|qDohk_z9fsYlb zO^v3MM`!S#%I;7J&h%qqWWkHRE72-}MTa1&8yPi^@_7^`@*8Q1FlOehdaC@QKj`w; z_fhLNLMP@Hw1iwZ!Q^15u8id%l8!Ljst2{IcvB|X9Ggo9JF9&qF4#I!(TjAjN``2* zG5SLxRa``m>}rJgo@=X(LmoSBDr(N2>^n?vRrHLHLKnB5wX>J6M*3p0RFTQ+9|iW* ze@ium&l$uH!SXpXkaxOuB}@>tRyL52<`OvoV&}qm^WGUBHVIh9Br zRoeR0c4c;d>oTcze19jK;tR;oX{B}-4k8uoM`%&?H(W5~at*}mOyJqExn0&V40?%h zgJ)-++gpx};k2>z`3McLsUDEk7KLwMnqgQ$MTKvZ6UKcd4FoGRI+?v;Y;3CVg_aV|=oTs|QIw?1gy; zNg!^sQo}fLO;>@J@%8HAIyfSiA| zDqttI+EF?%x{c`&6pTttP3JNn>w?eP_XgoC`kdg{dVK*vOuRARIPlHKK&3x0T*FL=(1LVU6S)*n}HC$psrvOC_0LwN$t zxeveF9UQjgJ{WgUlC{w37rWvr-D=GpJ^A3?HdWv}HYaL@)fGs$A&B{~HuM_Md$W*n zokzo$fk051YtIBNKApM&|3*H|_^HzLu~keaKV|>;shLF9Yee4lhIn-_`~?ihZAfmO z)a5Wk?d;USTjJPkeavy7!R_h2Ih#pTrDK5-6)&ePAjKmJ{xk?SZ5jG(o(atSb5)!R<_<=x9%Rpuc3P&X7RG+lh1ucicBI=F#M;e>I2eVZxJ7GL#u)o2hwH$}lI zJlgGOYd=?ACc%fXTW1&7AaA-iKo)uNi7clKgis3C*_BM`b*Rf5%ipiz-kP1b*PpS% z!5;tsSbx8UR!)|VW{#%zMtX+-i^0N0m38ZNR(P*vm71#n!X6ZR4aeCaKPN0Ev$F-h z-^gmnfW4vyA!I~Qgog`0-2!rTMl4e4Lg7}RE2yJv4r~nRabDZc!ceJxkt6<+d4%oN z?}j!eF2na<#!0PlXE3eCV;jD{o~N%6sbpzL;wDLD$g<+D+Tuo{N;iteyr3+(sWV=E zRgK6=fMi7ECOwE<)49ySeMO9T8fFaB*C)U9&%G_lv>URAOH%qX; zfZG~1sNt*&6px4)sFZ8;DfM64?a%pT8R+{~Mm`92(M`DKE0 zf*&kXF+H+VdLvB(IOSi)?&JX$b5V2sZ=ytQ)I)w1Nrvt?OrKpnid`$I?@br;Z&Y=f zI-Zg`v-H;?a~cJY=ewY6StKVBSy47N>BNtZOW3pG;gCSOv9R%kBgIROggo!liA5XO zJ9GaUoF{YpuF@vHW#l_`RUen35iVfm{*97kI8#i+Ti`$&@c!q-wu(8R)8c=D===%Hn)_h?KJm)F8tkff(VI zx5DZGiM`SfA}y1HTLqe|0x|`Cda;l$_mYB~3M20^!I6?YRN5fAH7}o%abJmwI3L$# z-ul#0>eloNLQtc1blu95RKVM!1fNXsUB$BL?<N2#3(@Jt1Zu_s z&V+)_O#0~TAQrS}MiTwwOQo^*Y@`PhTPt=!T2`9ZpiXdCLF;u04qrp2UaA2wIi7wlUfewHq zIb&{qH9Oveab0DS#<$xpr3Mli*739^{SDE^<1bl?I=p5NZ};Z(j+McWND|937dg90 z&9{skFv_{JjcN|o4onGOzz@oI_^!CjD)q;-z`s|Kn+PB=RLRP`#qZ#}I}bcIV5|HOpM3gsdEFC_BGD3`(p9FPsC>%4 z`(y%XqM$M2uAjqf`pb11tP0Q_tUfBx8_f5VQPf}mC0P?Z@T&#-G~aiLaiq&6fApio0-C+JPf*LDnh+Qebi|1u{3h#Em*KN;u_47(d0 zxq^<5D%#&SkrXPxe@d=Y1|Tj5Ph2ZY809b20Wt;*>Th=n|3q$(O`glvH^R`LSb>Xh z3sm?czfprTnK9;djx?FxOA3~N9HMt+SrN)mEbC%wd()*!&oQdgYPE{6b9hAxyByD{ zn6pO#t32_8y}RT&AP4x3^~MIKU$rk>G)N)uCL{^}! z+!{Hle&*nXrJ->}m{XkT4~!k>H2S-$92@Sv6??bwd?Jx@L^dT?{66|MbEWkHqgHoJ zl3*RbMh>k}K*aV-ZAMXXm8bLIfdg|`sMMc>>!p~3c_56xAp#)gFZKtjJS7(8kA0|1 zl_kpk;7N1kew)g&XdVZPhG9wV;mW%cVC!o9Y*dak>w+~##Y7Kv7`#17LLa%pyQ7wu zQY{9c0^xV-Rv5a>7GM>0)-G)obJ}r(S+PR#!VyyM)SkA7x~#wY067%`JfC8}M>k3e z|5S~ntC%IuznwgUgk(ScCe*}Ef$vQ=wxd++>OPy~IX*qQFT0bTtlPhMeKwLnI%FA@ zqsUV+{em=2J@rCrd$HG{XBa(J{ls%(8;>Ewha*q)!ZB3xzZ3`AvaK{8^f_2^Vgs36 z=A07|Ru^Mun(G^OLMhon!`ZEW(p+DIP?k*hd8Q$fLdp5KKSQK1&-7#+<-ffONrfi-;G;V@vcVu>j~O&1-QuGTI?V0kJ%C%nYaqp!PEQvyF`GL_pH3<1J>-SG!j1whTto*-1< zzu*+C6PFgmlRg<<#=Yr8QlceaDWFw{bS9s`ZsIHtVIQ; z(>>j*T4{ZuT_=KRlStF8$*e-lo1r7{-(wDRrJSdUKV}sAV@CAqx2`$xz zQaA)IgsA?29A5n}wQ*%?30t%>);OK5*zE_kRA(4R2v$+)Y6v6LzjLsx>wu)$5SkoMq~ zvX7KE!SRv)P2FXC;+UX5&sPa2eC12A0VLI2(uf9>Hi3;b6i}yyvw5TB6Px#VmYrhp>xagj z>a`Wyq+b=QrrCRNGHs&y>N=q2CU`;1x|BdX1G#Ii?ooL=QFJ(I4phkY+=OtfD*WbC z0bx#8RGq&m8>Uhw9cCHRMl7u3qD4=$*)=1n8NPegyVSFo+Z2Ay!J88ga}zvvFv+*4 z22w}1rKrpL!2ZhCL&_!c`8k!^&G0Px9gr>eG}GFb8T^s+_dlub{hbi>uYDhc|ED5q zY-X)zY36QZ|G&xJ7F9Ma{$y`H&pLQ*AuE)rE68AeJ>pH=KiYbixT5e3UpLs3DkbC5 z+Pb|-eAXY=3_qU)^v|Wc+ut`sE>>{Tj2HZ2ch@Q67O&++7Eti7nwu<9OgnQ1jW$E#kTCs6|b4cHoKmbjO-3`z+f|!MRDG7 zYH#fg-_@O+Mf7oyM-^0$Ylas`S(QI~x*oZj@6@6credEos&idUG7Me;c<;s2Ygl5F zcVD)5J?amu5IA#nz-)-up3q6Ylq7^*rL5=SQkDs?!IvIkr4;o5dK@1V$pz^bmZb6* z>ut}Cbxz#iJH=ZuduG*>lgFC(VM;6d5r6fHWdeq#O@8d@BcgDY9?P}$YR3)4J`DO+ z;gVRgM-)WaSOS|sB`bzl;)l_r|| zr>gMxeE|Q%wbm%6_{k@DHue9wRyyo|z3S#*^!HxF$B%ZozvN%o^-@JDW=|Nt^Iat| z6}V}jY3RZjjshLFAT@$BT zyVF{rdEH4EDyWyJ5YiI?>h0QNru(TH>sTZ%L#mv+M=`IDD3Ei4n{xiQU7y*+`=n$y2J6E9{v36l>L5Tw!uv_zcO%S_+)7yQ_WQ zrC%rb!~5sf6K@2Hedp*Ty_Y!Vu-YC}171Aq7xkXxoPZ*SV%2jm;2UOMeNgP&=TdDE zJJ(5vn&HFZh|a)@ObI~qbeWngb+8(7K@hE6x?`6|<)KjWt=Aw_KME%|cb_X1kBq`kHem+S z^G%8@wc80Q7br1ONW(xkJWwh3)C|W&;sa~`z4glCkqc`=#;aa5tbDf-%_w`Jp^9+) zw5sJ&h_3+qu8pd zxjQ{zn)Tv8bBd~!fy|9+SxRebQx+$y&S+@EVTZ)=7s@A-Kcp(-x76J?uR1)8?=6!3 z<)A)}o_#&LSdI>iq4~OB0JuvW?B_u^-S>H(L>@xQ3qHYjmD65`MQU z(1*t76N%~e5bfjqs$Mymv%#iYTEVI7Rii+I@|x4nV~06_R>*z$k5@P7zWE>)~7 z7I@*mvUTVd;I>A!WDyd1dzHx`x_p;p@RvIc?N^=aF`QXCv-f;loLO24pXD`#kgd42 zi0|Jf7#KFVQn^$4%jv1nAU!#`(zqSsPj+^2-3#i}aaa7+Mre-_VQ5)*;~~F5nF{@X z0i#P574AC7PbL*VgR-D9v4IdX%<&VS5{+r}le*`{sZH>Tx%M-1X7%ic#9#}{^_gps zX7$j`H}U!%LTT55iVsD}kS&)7u2NQ8RoXEFYb%RuloFL7+7h#Z8uv;1PDUX%4yw(B z`WI1=)=)Gud48)Gp-m2(KOS&OE2+t~PMX-Oq?3m87nN+i0gu~7^@>hT(>3lB0=bj+ z6R@u5IfHHRHN%@v2?l6bK>$=Nx%MKif_5I3$`uD1Sy*;lBgD`Zw=pj!Z=e!NrNKKS zYe(*nROFx{(}C=9D{DimqLM;7(Cg2o^Vu#90w*c#<0_(llxUVT>b6B#>=FOiT7nnv zBBd1VAdFypn}CGOR&c4I>#||5=7wFg^-BtR9kckQ+*B^rNix9Mp>ITFyXVOyh8fh; zwPYYOyq|qw$`4g6+c?5YE`e9GCY*}g%0zlcys!lZ?kol4b(06-#DYy)N!-B)zlgaX zR|Hxb9%|kiUt7@lyv~FPyL-+;bI%;&S6pB><8igFjxQyC1j?AvQ)k)aOe?+#abvkB zktRn5!-o;yWz%L2W(6l1Z@7$nmR+cyomQ3q|4ku&UxQDGt zy4&OwC_?jsif-Wp*)e98?igi)be+orDCx4;&%rN3)0#J{EdM)LceXtDl8e;m1-_{E z_l0?aWheZlGnV^JkF=BuC69xaxGYbPbkg-$aneCCr) zc?7BFoQr~H!5+-$?ie;2OptN2rWdFg`;*p{Sff_4m91IJ+7s;@uQFSsdKy`~M?sdJ zZ+XRtUKM0c0t6(s=nZ0uW{tU%dQaP9-(R#}F3Z(A%M*2|Mu zOK{dp*u@w4qf!uib26M537bCH8`PF4>~{SXJUwOGV(ymlIR57Pje_>BON3rP??TcTuG6SXCiz4#%;~ovTgKy?AN*>fZX5uxZ+xrUeYXvk zR{@`VG=qnpi$20hRn=8hRYgUGjv%-WmSBQ@`k_n6?BvO0FV9{^=jZ2Vy?$GIL@9MX zWwlV6^w|kn#dX2nQ$gwiN=MY(Nyr0T)RvJ^r}*)#8hh)sJ;LOeK0=X4%6Z$!XUy%^ zR(r$5+4!5#4=aUZ40s)RWSlD z7ed0x^ykTW3eC5f%{8so;WziU@vDP-?K$hKNQO=mF%S?tt9Xq4Q^sRsDDzD zS2bzMM1+;gE}s+SDwgvn@y5QX(7C}gsZ=O8kJqJ!inBPYQ3`FvP|MJSwDqYo%WwDR z;^vk15xCFBRBZuCDt(9*!WE@nk_~jd{_H=Y*2_2Yqo2GMeY$;4O~$iZAx`^b`wLe&WfL zE20WHiWPgN(6)7bdD+?Jc5m#Oq?z-z4sY&t zf0y&+eQo%1baHe_%Er#timt8Ge?C=F{f*}h`Tbm8(5~3lxkT21NHNg9X;dB;J{Hf zz!%^HVpqhi4IZV3H#isvHpMKB8h`N%Z>D0V8@_Mei@BA=ddx7@zTPL^b>PP!<2d}! z#;VZkJ#HsP?Zs72?+hg;IJsu`d@(s5=f>Zm-Dhb>i=i{!@!s*HoWRmh{Z%*7NY6+~ z!beaKkb}Nr-5gk7AoDKH&W_&knK)T61w4D_2M<3+iCVkdF1*|mYF(O;IY`N;^O>|eha#2vm3Gg8>uoo@F${U5^4X-O0yx}sy-wr$_B zZQHhO+qP}nwr$+8J(HV-)6m?vJwK6uT@B>Xw*62JD@;w zV==ZU6!m*ZRo9h~KH3Ar1wh}13n0_z3WMC*hlw7|dY$5j0V(Y-QdP{W+t-YYb3G2( zk;YoETfX%BY*V5D4-l5W1K9IQ!Gu+9%K&#Z_io#x=_>r{n$=*EDuX8B;bVkKxEEw0 z!pSW>)no+$m8fD17cj~kbWl(!gOl~9`L2LV z&?^FGJz^@r_W~iJ0TKqrX3PP}Xfd!G(F&ks7eG7c)$qukvQ1b}6?L-Y&&iJL&vIC? zutnD~C`yBbvDTWY9GLmn-j!1;0j z8VlVj3qqm5HR5d`u$t=1S#@Y_{ol}J@It>S6!=tZo6bRLr`-c#b?t<39uk9fqHjL3o(}-kG1{!GjG zJ*>w=IVY@kFyNz8^E>>6Y7wN&5}?b6e2K)^GKb&1vV06B-@rQKrt0|N1xx|`dA0qq z`CODkj=Hsw3*YdT!`4WPs3u#QcsMxx4Z!LfZ6@~w4?!laD4%edl@#T0!#XD9>LPJv zhpsMbB4iP!H#uZdjXWx1*9EGRfys;vnGA5oz7=vl47z0_m3aBAqI`|nKdVhnftO|h zWb9UH(}pwJ3HMW~*6iqplQadIv@K9?gGa@vMM((7fSNx#!nCH+@@R4rF@HHHB?i%G zCJ2?C^*81e?t*k@cLRNe>yS{cL<_Yc>pzBz^h2^G<8}YUjF!G}B#xHp;}H_Zo@nA8 z4CVqRAR(J_GHA_MJc}9CuR@QSG6-3aiKJ#`2fp*%BZ6I8JeK}B1shI$gtYG^s#~H_ zi;l^xx5E1BAo?kJ19w9qD@^`&dkfP*v6Bt6lk+~w)bnL0+3_s0w6Tvv&Iis$*P39% z@J+?L^52u7RGm@?0C;>O;>6DF0c%od9h&Aq9ByjYzWttpzxJ+4LU-gausGOe5?!dP z``J{0p1DZAaOpD%CNY7|N2}5)eN-j&9WsCVqr1Y!Jd2cIRk7Kc`hj?0kh*y?HR;9m$K@~#msfOp6hesZ8X2pg5ipdVHdqR+EsnTz7(;qI739eWX7vmlsjmO+1HRU)Hj`~ za2Y@~CVn!(1-7+**2L4=uPu$rLULOi!U!OU9%Ya2Tcy^0aYBJbq7O+#R9j9mboBll zvG!v{=Zq>;YKJLt(A3UR`l@OaB?jQ}{Vb{zvq|%g7P5+(dA_7J28^ys)^?d2A}yPw z^F&ekDGp?Fac5d{xQJ%N?b&JJ1&C^dmA2w^7n1^ctztthyhZQK0-^Fj#*p5C5dM?p z%7BCxa!ojZ`r0vTDp(cZRa~El;R3du%E|7#NDU%qy^G!o6M=)S{8f%IkYKuujI(PsK$_QK}tee0+4q4d?#m z<4|=TprS9Zo;?j0Dlb>YOQgRw(89D3`nVLOXCkDXZ+hE)EXu$1Z>ZwIy=upqXGD=# z3z5K|;BEbp%tKO39h(LUTF-NB^`owZh*3!R_4M6m0c&a$c5Tv zeO%7+S8Hr`z?Ihlv8?AzVf)nO+}v#ldpVXD$bL5RnEnVsA=i;8hbn9t{uR29`jN|& z=~50ERjqx-RoqioE<+wpcHclU;$Vv31aQ`=E)45-H%w#xqDqN)xq)eeI0KsDNR@6R zSG5LeiJOMp06y|Z1ktxL<)`UaQ@0Tk)#L}C%0~YIr9zP-;QUYwkvij7$M91KqC$5` zp`E}_%fB>17R+Iz8!0>C$ujP5EuLzKH!Z^n-eRTn2~T{O$Gc=yt9G_lk8fGzc66V? zae3N&AcI!Ju$|3_*c)`}af}u#iiZ;B(4a-LiYz-vYmFyM41CbYAwdb8hxj93vIto* z!6bX&3YV`h4pyg0JswjxBcCL!k1*dliu}+xso1YBU?_zS`qgfuoRp*5Bmy&f&7Q%q z(l}lPBZ0xb;D29?0c|j&T0(;*RpXo*NOZp3&Jyp3uiW3Ckxr{lsZNND`|HGjdi>2g zk47{@-K6HE?~PsjB0Wz#s{j(U(mk7sAuKX;5NM3+5r8f07PwVuuLOY$f1Yku*GUk4 zyh>6{?!U4MZ-h~9&jv8E-{*rO8c}?}xCv$Da!ZBQt%l-;|D&Yp#h}Twf?j8L9v`Q! zdsfjR_rVppF>uTIL4G9lth6DLpWTF$=dRHB;}{lSo{uu)HK>*<>vc(aSW zxON%WyLgub+hj`gr@tbek;qisoEaKckAb*wnZ*jsa4u|T+kOXV1qdw*O?H?j#G?Pc zL?9d+HbcO|dU~TuqJ~6bP?Z4kb=d4^GF5MB^UgRf)gBG+=MDdF&m1^m>grFvqo1BAHcw+`>;k;d!z*W+10IECnyt;>Y5sK~bFh_KOESB+Ln%6swh zPLKUX3BKugg4d(X*zHDjw@mfM@qQn;w^}D&*wtWGhsV7YDz_Scn+GK<)VYR7h*dVf z#}zcs3pf4fR)9}*^f_g%;wH_nV$_ zCTqSu*pR2Kiyx&&X15da6u9Ipp#$ z-}f6hp-3OhcFxcNfuQ#F&j;9qQ7tB8oSueiB~-&8hl`@{cXKK>ns#Q3t0-76RtpeZ z>BW@ho5>XgirR70FOigEl`s>VB^Mqkp!c|f4XpnCa^P7#S6NDOMledfFnXrIGQ*SE{fu&4%l3xpwY+oq|co52*I`t=V(N`5SH*KjcCc>5xVB4 zxVHtW0PzSqKu|)qt0?9a2C^w&Fc+z)EJ`b5%PKO86&JugKtK5RFYC|ysYB2F0FU|7 z6+iGdu1e!*PD0wcEjmQqO!2DR8A=8%H)Z{l;Egrl5ttC=$0*|E<#YE}BkqyrMm`Ytb6wibrgEU`c&sU)6nKoZaN$csA$~lT8iYv1z*T+(I=Q z(bPS`U@v#gnNPrx*|Lh9Lyl9vY!EeUVwuW zk#=nr!{tk1i{i3w>FB0roLs#tb|qM4d#mLQp=_%LOT^IQm^4_Qw4Td5Ms6p%>b8k} z){A+IHs)K-|M~Hyh1mxCBBJh->R?5qiM@V?(UvM=6|`x-Ey%r-FEq(Vdn6V>sg^uA zFdfpEGu=)%F0j%&+NKzj>pZdq$P43y&fXwL=gTG6%Z&x=$K}W_YZNckez~Qw(iG%( z!jbcN}+u6|UdTYU(t zji?bv@#oek_%{t>{3amOsLd%1dzL{SJL|YW6YMYlh-l!NLb0rPaLab}5X3D!ysnRZ zRaXy(Blyg9NHo#4htz@BgO+NwZz&Fl%xJd$k|Wg2l7A54_$9F0FMlLm>hoq0DYp}K zzJys@&>M=)a?+iPT1ss22%ed~kAbjt*%C{hrjNrLi($*LKEeTlUu`302pYoaXRf-b zQSSYEN=)-RK9(kbn0#WU=*TIZX!yNw!S&XLgw#4znNjO{4afAU>SJQII_p^ZVnLlP zT9mrT+RL_$9P%hzY!Umst}c(oN?JC6w#?1r!w(tM5<3RLl%2|W`A9ECHX3im_!@;` zDRlCp1$#R@S#H6M=m~`qXnylx&ezZwnj2YUA;uaYpgo_t{uTI9-wqKOjhQ|cOcvJ1 zy2pNf#|jB}{@zffQR{wdq0u7j>DMItP-I7Cw-kyfSQbtzL_D zSrFL0-LgLOHs!if(PzociS^P1!j>mAsox+Y#`~+r4RakeE&|>nb(5@(ru2+Bs zCyb{CW3;wsYn5uiPy4L2tNw}IJ0(4cH3tq~$p+@^b94Yt=pRlwrW*s50KXcZ#l|2t zIm&}Uh<1kRFu&`nJm;)xr4T^cx$)d*1r!BvoN8nBZDqFro~q$j(hPI?X|->J$Q@)|wG-eGpOtk5rWS^g@X#U0NuRK9raI@)LZa-mfLty+C!) zX{6uNOt9}~p}Ut3Ch%_gIb7O+M9pq!(5maCELo7e(TqyA47jdTSdG9-k_2C2kt3EQ zg(=?XAXIu7qB6N=%?WW@#~GrP75h4}pZ^yCMJ+V`mnw-6WGsE1jeC&CG>*#_bbeuz z-EqJDMOi_wiw_Yjh2T#0p`B0IOvhjnJS?-Hjr8GkzqvxQp!M?4573Nmz40YX$3f@q zZExnhEW<)2mOB}S&5H1`MFy&?Avzdaz=a5QCs8ODt*Y|_SaMhyQ&kIfLZR3RXW}|* za*l#e+|G~xMMZSM7#o6IJbRi96*~fU*kv~0e#FeQ$0)-Jhq;II*dq!__KcXH4p_aD zB0yeEDEuccIVFEoX~U<3p11E&f}YiQ*YQpBquIfdGEQE}w)II=KtakQ=5T^*z|P8L z@ceIK1|(;;w(THqd4M0(K;W&1_dSiinu*qx;>~&5QZvB?1%xz%?P7&k9 zuKdP~1pbwa?C4UrA=$fLVA+q%>P*$e(r9ZRjGvWaI?2N;sPx!HRmR;bKPMrr(SPym zNZgJ*=~p^WtP~^!YI~jcOF{zr%)%G_u{n09DG0E~^F^w6rXU2|m=SP^zT@@BL(+H% zJw(5x&BAS_pW&_Q06XKykvMJ4aQK+iYdckY!TG(d$>aIJsl9uqXP%ot!~MzTNzxRq zf3JTbp-N{D2Oqw!4!bge<|z^?2UcuPFPo$&k3OuRpXImxkCdNmwfi3~tQwEM zSxBTghWSk&TR^CL)ohfA3m@jrTP8$y_Wi6T-ZY3BI8#I)bj%QI{*<4WzMM z<~2q27MVp3>kAA*niCGG8}?ryWX3>1&M$<~V<|~)%5~#8uhrD&t|qq(cgwe$1iptd z#!v@TIjY+2h=s>GGk(z`&L|)L^?ISm-h^pI4{wBG+ip8i& z!_<`*n@fZ(LlIdfdfyQtq*K$mrN1h`i6$J{jyxA1luerRL*8xOoS+OTkK6ZIP zyamOL8gMVZqh6$VI7X69Vs|W_g;efyp^)vOf+NlFpm^pA8@@c^%uuK8S~E+DM(M@E z@b7>%g!t-5uP~4gFr}|1oZVJQn1c}fRuTiRCGB;t>1-Nx$^4{B%E3E@0MamQaBdhVGOah$x_YNsdP6?(Rz1`rzg=zXH`steA zBow7H#oi6;E|%A@zNoDHRWOJY1XmcE?DXA|#0gs4B4e2r`{%;=LoBJ6!SdKs(*ea$ zx2v_mHfJZXkcXtePx2N*fyxd$?OV;`@2y19WF@(c;VC%f^dkkr##!<(5rUPTWUnX@ zVboo~5Yd6XccMt(5ptBL{D7aLTS!ZWATlmqMuq6owGd>O zWkd!PoyDNDm8e-PLJB&8gkJK~rkU*&AH;LPlS^U~GLf2OfBoy4R7#j-uWG@?C+tk} zjl=qy9@2zY+v7ehWGX7aYtT2_Q6nr!sa(Vd5g!^sJq$yGG?%#pUTH|v$5uON1)e3o zcN%+kJ6mpYoBG>#mofyLqm3ADv^=Uw0k|GRgKv%itpFUZFEWvrbO{2gv z=d2az$JXy7Iw7Zxy9c5js=B-+TkSL#Gv=eZgUUkwS&!Wmc&bvEbOLFR)z1;Y7NkpL z`mzh#VCj>0#q4vQ9Bdyn8Np?QfG<(Xe`%LXT`~llHHo51Q&$&!L>Y^?>W4O+j+;x` zW0$~{LggDeIm@U)O`EI*B?TYjbL5i4yf=l=_t)e4!08 zaB@fKl~<1~Q%OH%c_2#QSmw`57SK z8V8#yyIBR9TUpdp>&Y}nKFW!>yRR4%o@j3+sG!r~5k8AC^I-(yZ4(E zv+k34OA?rrL~pb0RC*p&8}G*#)R}E~qR73G)@Dn|M|A$-lPzy(mG^kqa-aYaTE-XUlrt(=GhT`uFwv*76( zp+yxC)g71b(r23-<;{VHq1V7RFB8im*4OWXU+{(+rS9Mrb(^hTL1Q4ES*tZt4&k%| z;IG$j6G-bZ!zi6??Jh~#K&>g4477M2+5&^eUlqW2!d_bE1VqkTQWzrxHshcH89EyN zwu?=@J-+{Ypd0P&WCIWq^tt6q>m3O#y>UwTlwet`yGc1P7Ip4Ucel7`qFgNr@Fnh+ zvWQ58Bh(Uyu-`tH4fMoOS@lLUi0y35Ch)d;54W8-s9tf|*B@By1t+mrtd)|x?2|=c z9xDx)pjM@b5h1k-H`4S_6CQ;UM|9$hjixdW#+?a6AV-_C+Bt!MFKINl!023(ukm*; z0*Ag`U=8)6X7Nhr4z%rIlFBiJa=GGy&ccLZxS$FPqXMZ7DAG)1!z$8V7ST@t87MV2 zhg??&4-zZPT_ELTn}4XBO0wgSLiz!6qdRYlCV z=eER-TjAIp@3t`kQ*C@}3e4V?Ac|ZP!a4(|k1LGWULv+;;rbXCw8uPq*n|<1LoBEx z5aX0(et_@DuY;{s)0&!*wxCwZ*&rsH;;cd$=6RB~PWy1p5idL|@-*+d&JeI^N_?xe z-XTu&sf~9RVvER~pX*Yv;WDh&*5pXQoY*#AgqAXsH}P^6mc^xP)xV+!b|7X5jJc zd1?l|uan!*Q9z|H5X`VQ&}3Bx9d7=Qf3YqE+^aY?iH`@L40;Rx!sWJSxTHyQ!cO5P zH@gU$K0V$>_=CENYMKnV*xLkyuS`~xL$i33&BMg>9yNWj;97y=!W(ZaYeOUnoH(bZ znPGYETGS;$L%l8)5hLx2zE>FW8J>k0JuXNSnb>GN*CZl(oy-L9sj%+@Zgq0RW*P1z zCSY&2R7i2gR-^F7M%yfS4R|vv2^gAuZT_kWxsc+-p5eudUiPayo#yy}uQczTO1b-$ zFs1VcgG4{tch^iM(L+Wn>PACOL(p?j_K9>KMj%Hn0h z;BS;TrMFg8xO;P0X4KGF zGYsA0MF1jEiI`b!Y(TL^z9<%Wh6jqy3^wlzoQPeV0pMlpH&M)->W$owSxpS~QC2OMPP zvchHndw;z~eXRh{qromXiK^aBC9Sm3({(Sh8=9Ek4ish03w(Dkz-~Dx@ye6EN94+3 zs-fGoIy*;j;iUgIM&JFC-<#x6hz@Ydsl6ZTVizTOkUGKi3A6qK?|Sk7mD?Im7kU&i zQ6@UDyMFd6_ZDPG@-6p`rSY;~shW?1oym22rq0C}6j0_wtRT2ky3rE*a70^~%5=t; zk8MV7MqAPD6#0b-E$QGD73@SyqP02b+RNPjy0jy~A@VYogcyZWUJd=Uz$&>UZdXPG z;JQQ!hpoYqmhUt0Z>X6){2i88KpEu7y_^(00o4Q|(alOQ0KmqNz{}I67MwAsBf-o$ zM+)Z|i+qsu)N!nJ0_oeBlnyQ|wW})860uEkM%$JKB8WZSA1_pG7T}9id6A6^7BU85 zOimqN6)9P%8W7QV$Y%w-rI8PM|5&zHKr9epR_^fY2E0+>@biN@x# z5GMwGr$EI+g2PU^RN=PMU$MHDD zqPMonw8D+tWHivzojcefxyZrB``3jg4L1j1T;;y6BO_{aN=@r>@C3W`Y+T|rtKnKd z*4Zqe)NH?@p(yzU6mxwcFB9rTb`gkSo0B^#8D2xd7Hifk>Nl zgs~$0R<9f9%IZku3Dxb(VQ62hhs#5WEsTBtKpoaE&`V97cCUMuz74p#BWrgD-r=Sl zmz$o=4DeO6dsYB$y^!q}eR6NEDdREs_hH804BolL=bFyJ#6un`ABkx3#}y_`+TT2G z!p19p)5%1QMGg=E@j`T}9p^ye`{u<;1I=WQ06K8j+v+938N(=rMycd1st%R>y0W8Em=8mgZs;IEPe# z9+UU#bJyj0VMlB8RSq8q;QFp~cf&3}NHT5H{Rc-^As~j0e~8RwDh>3^)s}3R+cev} z)%^AIm9yO&K?oU&E8t?W#%B0lXZP2n^?Kk}pXoMGkfE{%*b%n0Pu8rGi76e+Au^#?)>c*z)=lYcXf!k1k)34K%awI(iFN&+DQz28qvYs9U+%_H zrksS)nIAc~cTy*=ZtxgepLm(o=X_o_W6P#9J=-ITwzrHX8@8g1PfTckCryZ*l!ZTq zEuSM)XMyAxg z$|@zng@(V7XJ=*5P{;L?C^uJFO>H-9Cv!5#)tS`ejHrkq>=MTxXbqvHEg}scA2{eA zr&`W;1v)ZS)O)x(J-i>b5Z~@m71XXbD)DH6q-BP-PyMgkYw(1_o6FdwYaV;IWPan< z&~xMF7CnLHTc~QM%bu|Wr>pOn^fmTXi-ouluIB2R8&G0p3w5!*7#&9wP-%MO*q6ts zb^%q#_Zug=TuOBIHt7cYXevzbYRwbI7x=tQ<(W6%GPR*aSyhk6*6u#A$|X`-mrU%_ zn_G0SJABkF1aXMh<3ZzoS|YA=*1jt?8pbo*Ztd@T#kxMg@~_lj>rj)?a%MdSW4!f% zc1MPKEC*DBTJ%dcR3qLJsEXytE>puayb%+UIkgZv=j-lVi#l@Tjy>t^R?A23rgN&Lm^2-H6TgP%JNMtyz+T>hu=f3NMEV2&zb1V2vD z-e87L0GP~c;6B6(fiVrbiHu-KgmHntuk+4fIwn~P+q5)^zGBNKGc!uF%|wIv<@?tM zWgVpjRGXX)ZI&r8UZsQo24@Bb-4s!labPxa7T)d^%0W%8H?yW9dIl$RZ3rEh=qhWV zx}3;}GWALWF4a?TNE>QzNX!fCqvY(WR&7ZxQ`y>N%2)zRAg`ytLEObV@L?61EBAT5 z1zLmT`_sk5d63S8Hb?+XRHN`25-K}{b4RxsYD*;D1Jq2?Qg!?Fx-OMMKBwLE+v}bk zCUrL*m1vx=3(@Lg0rrV0bwVUc2rm-G2pU<<#u~1SEL|+ulu%z3se6|%_OgMV*QOh1 z=P2P)sw@TtaCr_J~@=Dhd6KNz3(7jORr2*Mj3UWUWoS^rgS21R^%0oK zE}p%kd6(LFjMGg^s^Ah&D9ZQeH0=0eY)Q%lFIzWTuWCyw5~@08th$(=L=j~=>vbR# zU!NrR4#FQuphQ%SeQ^|ESE#JrfQEaZC?Q|+i~N5)s{e$u?U|Kgb^ke3jQ_Ce|Aw>e z9qpX$jO?uc3#a}+?AjJAE4wX@hb~`G`cHw?Q7aOg`mQ@0Gnl~I2PSV($o)U>|v`$sf|RMmsNMa7fs{~b$(`X$gd(2qr65vG?W9DBx@n& zZIddMeQ=uDajFC38N*SFw<*`LHi^mVaFZ-!32&^>{z@6Nu4o>{UUGsAQE#|LSV&4e zW~AFf&?8C<+H^Sq256EUvPLum;d@1<6EIiJ-m=Wix~Z zxQXYM?SSTp{kB41vI+$xvWyUM#Ph&nOmN={4$RMEJM(U^UfPn{bkS;=uXrG<_gCZ- zK%L;}!Leo;^;0)_c%NBwuyhG&(f+?r)iS!-*}3YfX*d2Tw~5e`0*Ub=UXBt2JS^h0 z*gU2qN#X9hmTA` z-v=M$pKjfrFeWMY3q;<1{=9LHl=Jcj52n;UoZd6B=sl}&0+5FDk)AYL@;`J2Q+LpH zd8+z_`EyquVQ-u~aT#Z8=XK{rgRU@bWVkm*Ds#*LX3-NR_?ko!IF+aQjkiufp23|L zw_IzxJz!#E6{2Mi+b?wUQNBXO?+>t~=GHW|uvOr;lht82AXX_)DG2#meiPgdy!(%0 zsc!TezC9wo3*1^L7Ja4|>`}YX*DmT5s9e;k5=Tkc|J&2yu(!&_cUbyadmD=#vT?{( z-V|mKRooSlcd7Ew8T!f4(^VePL^qs)2OuFu5QN?CCc|$4$YLKBLs0Gf5CMH^9*l$* z79_n+W$cT_-%6DSc1;N9#QtR!?rKxmFhxsFkV4J|C>n@qj}2mBdWVR|h$|B;3HLcY z>~kbrYKx4p#;<7=BdOHOe)Wpwp#bX?mCyl5w1~hS#vh1O)MY?CE_v}yrGKy}AJXkR z8G!XICMcans3`K5Dgw!_qok87 z;Q@f4nA6Ic9eWRwFASO)new4&wo_*j%A#3a@u5QIMTbe+@U+<_htzKpRXE^IcnB9+ z{IE_ILWI+f^B89epx4MXCbm{OGhF|B9Vt#0wn00G<)HHL3Bj{N+)^+s@)p5T!Jd_H zGH_!_LPmsMi0FN0)B#La+=-fY; z?$L*go9bK(X+&Lc6!M9`JHP0zQ)?eNLA$v2e8V^?05`ihpWg$_#SvZHVi1XeFXU|o z(^-k}#hvG|7&Z#nt)0hws<*Fg%O3q;jIPeHW76P$cisUINh*HbHQDo0Rq>GZoE?2z z*-o~gyY|Tm6!*`kaFGoJU4yjiygwS3a;It|WmIu~0#kg2`AE6eG(Hq7mgeZUww;XU zYEzVxBj~89p#m{~di@BLEQ$Bkx6`O*)j*`boH>_{&`$k@Q9Y%L(@x|ZHqkTz8RjR9 zaHWy?d+c0<9d78%hS1W33lULfmNzTiFBx6kp>N-G%A*u=GChB6Y4&nEe8qz_0xN*q z03%?#Yh{;bSv*mnY$|VdM8U@^QmA>ltNhjQ6(k*x+47!0J1FJ7#pFEdXyG$CeU&td z8;R9z;|hJ>5Ng$B2~@V85a$!c$Yscz3U4LS>Ne%V@=>U&Pi7)9YXk_oUILhsa{3oB z9w;F>pDHb^#5_;C{xl**zeb!mOSFms4LEBJyyx(uCdXkU-8|j1gMw6&8=hl8;BrzE zw%t^c+-GVyHH@Gt;tkzBBT~}b87#7cp3sCtsbc`6qIpvO5SLB%iw%WsZ>gLz7HuBF zBxg2;qTo{=I0hUsn%PCRFw9uWzfD5Z${1SS#vCBh1l>%(4MxM9(KzS0j%*(C;>0f| z-g=Qn70MtO1jY^pv2b`rO5uOw`6eQqbLDuqOovhX@{z5IRr(g*OB;tWO>!%9{^)0- zW^WZ+6WC>u9CVa_HwS!3z_>9}<0@W2u;l8)B|u^zL7P!R$cYR=_}MkM4KH^m9JF%P zkFpY*6lg9)Tgm_Za*mCBCUwGjAT#c53R3d=$vI6*Z{blvRg(};_Kepz#i*}l&!~DJ zP-q7TwpZ3P$c9R!Yr+;Xn!tdb?{kWeW*p#VJNnn4_KpalPK0e>F!$q|0;f8MFP1AV z;hz=r#Qvf1XPo{r0IW~2Cus8#=?y|cXMvGZ8dn#s~S!dqy@(r*1B`DdqlH}aEsIGm9@kQ7dkF?* zFOPKhV(xs^>3M#mo-|3^Rj)BEG(F^V2HJTgZV ziwepzE)cKTXlnnkxdd!_iN)ad22Y>1d7|PbN z65f*}6u!Yt_Ni)rGC;Wbb2GvO?E}zn^-sOS%2RZK_ZWXKGK~sqM-uo0{dZ?8o2R(} z{$?ui$BN%9tKSSQkDm*6IYl1F_YILdz%&r2IM_G#;SvOZwQaXW*)E+R^k1r?y z9e9&V8f`v8ZD4T{r%iu@^v?z!lo23krI->EOO<_^AG7@MhojUnhQmKL`LIfdM$b61 z&Wq_|{8->&Qk;dV*-&66xU_^!)Hfbfxj6W_w;dHM4xw`bC#$2QzOs~sUhz@;%w!B^ zPSQ&y4B3m7o`w?f(AeR;av*5E`L#`&X1~wk07}_*zYRMSC6FfH;EsLQclzC_b1G3g z*y-uO1ABQH6*#MQe)``&;Wr^~ttUj)KPIWNkjHbJ2(Uuc49na7$sl|Ug$>2Z>vX?R zaA6Z>IU3Vo3lWT?i4aN+v#E;Z!ub!zGS2MXAnY zW^`S4cD1b(cV;yp<*)LGMloxZRW9c)ll#VFlzm>uSXE)qKY>&{qVyBZ{t9?7at`B7 z{;Fc4eymYKf;rmFwDD}}Fd8q}cVZI~k6&zM>W3*!Gj8|Pk-@iSXjKN;<-Mfj&pCya zt_{B`Z2j;&(C6Di#n1Q5Bi2JnXXzvk3I<1Qaq$F3jsE)wM&DDZfvMd*q05UxtU=ok zP77^8;9ib;I99d5Def`ED5wT5J*FQ=iU@OlB$#tsGW~FZU;Yhr2Gp=b>&S=#Pu5Dq zs3~Golp;*`;GqO_L*n}$HOV2=6jMtsS?Pyq9}dA6{HtS#E>)1r!NE%a2>pYXA3)9r~fCfUTa_4VROR$-t-Bk`QEj= z#1wbo!wEF;vt90%q=_MRLDO`OD7T)tZ{A1S;YoOl{Js)bjEO%+9+7O($ODU5M-)>; zenljkenj_p|0K#BX&~s_G`$|N)Bi@$=GeAvamJW=P^c3vs^F9ifb`BCfrJ-PCm8ql z?>;&OS0=J9Fg}3E#nJ3hZj(HK^5nT&>*f+LkrVIbo1)nDsr7XobtutX;ADQWD`6 zvxImc(P!MwJ<`Yp-!|!YL?%U_C;%v-`C=Fo9T;qyc_)q<;w;1b&pxt5_4|21k>LL=8D>}`{K+}hXx)GCg?M1>Dhwv2l^ht%$jSc0pHoIVQC z&$E8d>mM3q=mkR$q$6E`VCH%-A4O6(vS*bIQB%|8LE&tz)mg9S#sxHu}SpU^k59f}SH z#!p+1dsD0jULC>L5xR?xF=B(%A2nb=UwTF56l4hFlirpF1%!+$TjDXu#n6WP5Z%?_nJkMUBmhx2D!1@L zc$;>>m4b^F zTbhlfLo>Bnr=?oOW_6YtM0|^1{pZUy98fhebkwm1vRn92%7b?rrH^AC==@;g-?dmH zPkx~x&^#L32*AWspmV_pt*a%YQEH7;Z1?#ug{KEVj3NhwlYa}+KD&(q}e< z70Z9-^}cd}j9R+@Xk0)c5o(_~g73oLuzEp8Ce%k%cU$s>Mw^^n1b$*=9 zOh3@kN~I7tUB$XmKR0+J3ol?VM)ywl-#9?`AY8a^Trx;KZh$iAHmGjIY{Z`6wgh!B zdQ=>&S|2hJdjVCrkq2r0!L5WJLdx>Dbcky2AP)k9<%a!{W{hg*3|ePR+od8gIdq&# z)GEWwy9@N{-kL#faS8g7a1BOBEa#KrFJWnqT*(rBf^Z1SFeo0VwP-oY4lUAwe{-m) zDQfb)0|&J-rIK{VCeWF9;`*^JQI{V#HU0juZ~@k9E4<^)49Dcm-))@n%*eWD;&ul} z?g8RJ@bPmW6}2ASG_HYD0JH*-RJrJ#xB7#mlYG-{S zOEhzfN{M%GCA6@!)nj4HZ3Ul9+bZzQQU34yItpeb?|KP!aWgfVAc&ZzxxE zO!VZ14k+7s>KnZ!XihaGM2L}(i4B3XFopIylT34z$kx8L&;mRB^q>`#CZ==Pj#LVMEbCa*x?Sl3Lt2<}8gk+EO` zFHoM7d<9oxK%PmC7Yq>5>D$7nhc`6lCbE>tD$A> zrY?RA95_Fb>^BQmS1Sp9umt%QIVHdv;gjm)%4eCzXKUKeTdT}HPS^x=gFwI$+LNt@ zCzc61mWWGU7MY4M{Rp6_eWJoA=HGEUS}KqCn^JoK06c%2C?7Y(+sYRi> zLpp$4QJXHNhTdQ529TA@TeI|{FJsDZw+LJT+W>X9V>Moy9a%4)^ z2UrbkaKN+RB~*I)=!^&HXiUwStU}JNUV!?$?%fGMlkf0mjW#}oNnvY$wzSf?oD{k8 zjx#N_?0}>Vh6pc#fqC zJfd$?VZqvsD|Hs_kaKI`~K&B&pGq^wH=KrsF2cf48opJC;VytWLh^`OXej~ zW47;s8G(N>67H+^U7DrXwmLbkeR;#d-ZB(Wm`m@Ssa<)#Q$kc{}kr#*RuV+;c;AD(*ni(O&4zieLm-!z(i#ziIERHl! zk#gs5$Da?hdgI=k>7dDY7E_-T_d>TLk)bZ?VCy*03;d7Xh<|UB!Vjpt9*omqSP4?~ zb*j4mgTjpFdw*|9t@fk#7hjh|>HVA8OI%JCEC3fH^sWD{E=aw)k7 zwhr7VEwna*51W@h(z$v~DeM+4WRx%ML=f%EGE<)DGlFwC3)rMUbq+J=kbqM_#;}tCT z!B2$96AjB3N5$Xw{JxeK$>2zNVfMc~d+D3fM%>nsx{W0Xv|7@_bR4PHh;Qb!PH#gT zsXROMtXGIgW~+_@ex{B8pm1?V%HJk=c+Mn3wYR=%HKn93<#KUbjQ<{lqfBg*CRM6s zjts{;FUm5vKr9_4QO%USk2YoWi2;(h%u{e>3v|}?v@fvrS=srdBvhc8C!6pQ7Q1{M z7At2cw_QsEsiD1{7rMhz(im`=1Qcb{;V{^DEAY+)ypNOL3i=nG1KzP|h)vTHsu>qm zoP_;~woxYYo)N@keR>m!k(Z9_<{f^5ppM~!FV@&{IfoPvyG_w#aI{pW_ zZIS=o)8Jtc4rO&h^+UcbEyyp-*?L$0prYMlz(@U9|Km&PV?2$=x6Jk4xD&?Zfl@on z;mYayzUZA++Xp_Gsy@$Q&#Vc_i9v37?$*zt-Xrt>-5WzE(=UpjPkwk)F?UZT;@-}? z7X+O21Ng%q+G+o`KW?TF`AToh3vd)x37Z=g)(QMi#a2EtQ8J_5UZnCt_-s=PO@=h7 zke)t1Q*ZZHg;FqL@hZaCwq7}7f|9GV+4&jj6p#M#RJrOH`rv6x?a~;(+LX`T1B2W6;my^UZg`ZYb9=oplJo z=1rKNn9|pp7#lcj^AqRC?4xrdC_V_3@d^~nR%~NHQQ%^381o@_S$RW56QDX6!N(K} zWyJ=JbXexaa46uCmKfg2=l`O-K*Y>Z&o-aH5wlC2iLFM=B*+G=Tb3HniWnk$wh=Zf z_$EN~>h@P zJwJe^tYe7^W?`lY>JSHJ*(AG`1?m^~^<*&_ppH3U1{)O!!@B-C-^Ky8LjiNN{Q%)u z&m863D4^~PV2T2D2*px&uz59nonoy{J(z-93qrAhYCXWTj$%D~uS2EW!AzFgYni~p z?q6>Sv(KSY=wN(;&RYD{vHS0dpmN?|B2irk(W;;+WEy_4j*yP^_XSjc6-=PAdzoN0_@!U{jk{{73!w^r^E9IVR< zD!AsY#Ji60BYPU4Z#{vL%e^npSDS^)(u0lyR|9B}<)u1m#WyxcI@LKO4cKX^Z4^8f$< diff --git a/python/lib/py4j-0.8.2.1-src.zip b/python/lib/py4j-0.8.2.1-src.zip new file mode 100644 index 0000000000000000000000000000000000000000..5203b84d9119ea5415114939624411ba7d3daa8a GIT binary patch literal 37562 zcmaI+V~{R9xHOE8ZQHi(HMZ`tZQHhO8*6Oaw!Ox-zh}R7-XG`GK3`Hv>3k#AtH2BfiHX`GLH?61ie-V> ziuG;V0CDeXPq33i?y^aBwK?GwAdx%52f3mbTgnWq9>RSpPK)CXMx1NOGw5R^s$@WO z{a0h&$0gWUh5he2sn!_zskHjPvGDvm=vhs$ucLz6U`Pv5S{Yzd2-Ay(W2UWZSiG(i z%PY&$C+w$lDDZ0Zk06_y@6kW83+W4inEq*lg{0Mdodn(V%>nR+oStwYgIdfv;_>hG zT+1AH)?M@56v1D&$SmHn9q6#>j@ZT9+9;jDM|?~_^w6lHZc^2?#l z8k6M3Pi56CpvF_8cF>%fnM6IY3+GM>Hsjgk0wMoj0-0Ib8QNHSnL7P19J*Gw zwO$rN`pMI0UPj!V&{afE5gJgVhUyJiRUldGF>%^(Yr=Ko?8!UycX#9LAbVBT7DKZY z(4~0%m}X_&=Fbqw5UFBez=ZbU40cS4> z!Un~WD=j&2R-Q>Mdj;pjVCM#-U|kfUI47Uf9-{IpOw^nflJXd0duIVfXpfD#e@`vwA+}lLu%jbur zr|HEUjaTPmpfQ6T3aIJ9YG6=7JF^%qW(hm2j)11B91*Eve3k2zw-|IrTOU%u?XDt8 z_fpYGc9TVOe@sI|=c%~YGxyr_)CnN0I0UAKzE4_XtF>0i^-vD;bsL$II~@A3OW}p~ z_pTZ%j2-12TZq7vDYlFY(Z~?jZ;EH2b+A)CP%Q64K)T7n``#5oxpERv)lzf|Bdy>a zC6b38*gRn)EO)^c`?PF^_GSBL zhlS;avik>Ll3eyG;M~e7KG_Y)?{LE${vP}=twHv+_Rs~x6YA$T`UudE5`$=?pR)PpaFciZ3APz*(0Lq(ss;Ni_a@7GbgVp^L5n>6IYC10%HHw+ zNB8jVZ_<^ZKtMN0|8Kg-`oFrjGITT4H@3I2F*SCvw6}Bq-{h}zX^+c|@O#rIlJ0lc z?iyRtMTj8WB*byKTbeF`+yzV5IilKn;<0%jV^1LEBli1BS~({97Ax!U2;Hi$mIs+a}FWeiH8FLijssH@Kow@EUmi+Puw zUN@OBGn##5!gb-vEf&3l(-Pj*CTLKv`?y-U*|jTgnsde?-!SicAk&Ueq+7e<^rI#r zC}9irL}ARloqMEH2)S)C=!i;=IZ*;q!SKT}AvrMIH1|mwHOzz7S9T#YFZ8X_RCtUD zCYOJ=0>?OON_?|tD5^ZWBv`sL53h_rpGXk6q*6NP0UXt=3i=bs9-Ty)d*Lm^{m=`y zNp1rj1(ownC8=YKnKSkILzi{@vz`f;aT0ik(|SGstkjSfT{KT8c}xeXVNbr6Xvayb z0ic*OPpeOPBDj^KlU^7%t6)&-5%;z!JZ^1l0Be`TT%scet6IfBok8n#MlK;#M@=7v z8syu&=l2f{GW9}W1~HH?K(X>XSd5}*7(1}b2MNSBdXR_q8~GZMNCo_O5tE>pkjW1C z7GZVMb)rRDVSr|UN&%;XnnM>6J^C`DA9T;Li)Tli8bbcHW)Qz!d!YrE2797Y9?~AVLjqoAcnyuJ)%KYXUZeL zcB0u3^JHGWDpWeRmt#go5<;fdMk>xT6=5O|<$Pg2x4+^Mh8#c^G<>AoArM_;Z6>Hx z9`*=(i2{yYro~dxkc^nd<`!WJ7nI$W1qX(XE??p^%)`<__z>UK(EN8(QT;#*sjjff=O-(Y5aV-MF6WMg^fDZLU)TE%6JM)qxEskgPa{~{C6xi zsFPn9NOX@Tw!&};RG2()f7aDgFsQXhD!2OrmLf6&p+->yBPhQ`=%3w3K?*>4(LHM4 zy5<+JHztKaZ;;GF7>#rZ8_AZ2?6Uc58(%lj5uFXCh+hEm+ns9|C{Y#Y^bnSJf(P7m ztjMhuaZ=;*a`pvDC>vOn!JM~Gd78`dwVsKf@yN-v$#5DuTlNG?uX1USqFiLqps zTfX)V`%RkkTteP$vap#T3Rabs|!(BDnv zS9Io}q9x>B#C6)(g{hWWO>Zsg+g-I?gK^QOnMhByQN|Y1x&mu z^lGE5yK~H%-r7N4Nh!vW2u)@u9G8=lFHu>~Jn2#cq6jFfa2P(AwHO8J4sG%Qmhm83 zs@ejdph4{{nPk1O2}~Bg_{yyNy%68F`N^{O&;M zJzzX2K_T9w;?|>^#x+PPpjIHVYFGWU)&Q^!vTufsdW+f*v8EDJ764_thoWsC4y0xyF1c>G5o`)%XdCe%x*XMYrFW&QxOkad_G9 zCw0CFzEUrMAQ8dfxK^-xIHvKRq2ebkGPBOVukgyigQ-{w-?c(WG$ef=V6Ay68>*EZ z6FvE11FH632F7oRT2l>)krI?+5<`$|EMdJaWYb)w@_%1j=s_KSdN2x0RRe|Km4QBE zK+h&YBGqb|H>hcu2a^LoPOz8HSH#=l>Ib2z^miDcG<< zmuZ<2vm^6XPy%4w*|C&$L_W65b0L#X@Ox6#@!)5Gc=pwf~XQ@P#=f+5- zDN)AJd_Aqecvuc20yhCL}k&SIjm`$kB3`w*kyOPS^%^gFzt>J5a1f zB$bOel}buq7Mn@1{0O6|f1)EM72NSUS*ecqo6&j#0X=`4svb8a*eMn@sUQ~%@&;gz zLpwrR)0!=&h23B31yWQf^P+;kyL;l|G6N0{UzXRC)9LDh);2(AxvTIhvzuIsyTAQ0a^0cC!H;9ztdYLME6 zDu6Vmrn2XaxE$L3Q6Tib;t5bHb8(!!i*svPeI}2LI~iHu+EXz7=1>j->8_Yiabn3Z z09p-da3ru4AXa($=u80XXiUqUtVYeLS%A4%_vr+pD{y?X!5E*yrm}N5U0P{ePL5i6 z$D5W}c0|#EKt`0p!oK+=oLbVo%#|PiIRq`Jz0}u6-FYqs>2(LUsl(Rm%g8xLUJ~s{ z88NV}v}A8k)0Vfj!k;^(-jTV^q7ag7E2RJC5S?4kCIS}Uj_Tm4&H}~)k{xKoZ#h4*T5baw z8P9R-Vi8&fxQOnPlEn4st!=?NV#ujYcl_(thIw+B7jS&4n(XG9&$56M&!g9o5lEdRT#B&Fhi}OHGk(-cpOI>%d{s^U*h;voDt$ ze`AYg!dv2E_&n*Cw1@i*19zrH$X39>A0dJitGV*E!7C#skR#jWn5V_Ds8jOW>0Myx z{!6gc9yT%u;aWptU1_>f)Uwv}<{89p+k`K5K-gkyYwMj}{Fr>S(5^G7oXySk>uC3L zwEZPKJM#;(X7As6!3?Lbg0*knq>3+?N`@^W0kcD=y=z50W1--POpEATP}asjCCnzD z$CZr@?ARKh^C!qv+ObZB(A@A?gpiaN3k(pofATI(SzEpfCe<3Jv!Z33Us zBHh7gn;!b4yFdR-hSc1jbR&1IM1K}dZ=FfHS(Kj4D(yKn!V|1qS@%QMAq1?Le6w0M z?$&t9E3Uk{SASI{_fS6=bMK?AaplA?tkfd~TTKif@GYiEW*H zHIGN_RL<;!PbhO#_9V7QR=)f?6rO%XvM~D>bSp<*xo*URIkXp!S^T*chbUN_W?qrs zV3_g`y0@!iAG-^~Wtb%YU&OkE$#y^WKGe?5+dBXb35+iYDChWSZ*J#Fw@?T93`;f7kDKgp4J zLb8irq>E=N`_Er*EJ9rjiur#F^S;>!>14vl={0d1X$@Sxgy~*AI77h^i1U%RMbgS2 z0Bc{OYZnwp=xCt7n4$@NmZ(E8T zObx{;(v$Kwb{!{T?)U7VL?HNpMJ9pIJ(YOBVqxW@hGd%{ z6Mm9;UELjBU0r~#?lTF`+aXaF+7ewKZ;YL|J;D$2-qrQ(Z9e+aijJr&vZ*Kiq>HkI zf~GGadYfRRl94me)COya5$4)8K6bL7u< zbg$3*L;lMP!V>yZ6<>@V=(;yEX0)qI(rzT*3=Lij|AkM|wAynOniPs@v?(xD%>$gc zCfOt0n3LXf(NinUziKTq9iw<_`KWiiFibC%$|{xnN9n5x!pFlK1g}~@40LtvzvUyM zfQrj!6h#_9p)`@?cXb9|L{>GRddpN(RH%`k znV7mga2d@H*J;($;Qs2s7-UXkmZ)Lk3Rld1i5#1%@(!O*9bGN^Jp2_AMsy_aB+6z;>tWN)ZdiACp z;*{XDmzY?Xm{_^{z|nKp^L2Wc!AsZ&0BaO3D6C9xk5y&-TLwDn*O^{@p8% zdr^1gxb2&%>Wr??=3gkOAnejod+IyO`nM7>-R=l?uPP)k|uT<{R}k< zIN8sg=L@*(_Uh{Fi;P>42UpyGcx~bwJ4wdL_i^*{;}#*Stk4FAo%c7-eVw|@>Z54tq$n?l`uoGFjkXe%nqsi?c_v1P&4K8TT_W)k*BrYU~T!f zTxS1wNn-`Uho(y4=jK<{q~nKqMp_5ygR}mE`Pg{~ zsiX5?uleLcfF8W1yG1GZ%mk3ev;|3SQ(kY}!QN&7>rG{&WR&K6CH8q;i;9*(l;>lB z1GZqpvO%Y8Fd|_942bd9?n)5zc^jb+Cqktd@a&wVe6=ziYj1{NOq5B{Fs<*~GyYrb ze{hU8)rr&nYY^s?6$usrIbQ*3FI0dWmbRx1+P^=#=YVUBfnmts)K6uKknT)E5H5Q` zHjIX%gaO%*6#~JcDHtVhPIT$0l+cZ)78d@e7LQz@p&JtxtwVLg0#I36?7{K+HBZND zf%n_1szV@RV>MM7u~Yyo`ad@~?V?-i8}IIlS;!Iwjsp1Z`;6Y?)BZ7YFnm1gou0(;=I& zh|`jzjS6X*V4VjE?LMRze)|XmrFn!&dR{wYKHO9@K-E)rDah55a&Ze`9@mE0>r*j- zP1zp)-ND>*1{Xf~p2dYzaU5VYA*&4B@)mzQE`IFM7r%rkYxygL*@(RgX;dLgbXQJG zE~=PYV#dw57IO9j$!^2}eIC_l+e3(lC)5C{vC(FFPxKIM+KTpxkX1!BPCKk?N~w`{ zt1xnX!H_14a<02TeqK>k{-QyPEM~{Tl*5EnQcp+;7bhqj%S`^}{ zyBVR_9pow~;Rs6R!9n*#;+48zX4jfR3;a>&fkxM<0-e-fzOar)D4{HeK21h8+@MoK zs{0Lp&OH#K) zr=Am=Rd0>+)j{%8iUsA4Mp2aV?T!_piEb|+Zm;0OoTKl@N=EHdjAZ9vE$}5op2Ldo09R^VQ_JS{Z(2qGzD-O>n{wrxsM zto~ey_GM+Zs_Il4$0^B|)S<6O8rl)GWBBS7A?D4DJz+WxnpX$(Z5#kcFEP_ zYFlnjQiN2xInc{QMzC3b2y8=ISy7ePKxqFw(^H!@(4kpHn^;eUWht<-2?Dsv<~bGN zD{{2xnHpb2r+|Udo-^c1!>sx{iy0HSmU*%l-_V<5RYK7j$PjE#Di{82>o}MLKZnHO z7hzG`;D2{ZpVmY!qMT2F{xz;Sj;RaU`$Lc%rgigCrKeV@&@eD8@aZs%f^-lV+K9%s zd0@fE8FWtBl>eA(;ONc)3OZR#aa4eJOq*6_)T{w$;s2Qx;v99+zj26)lsSa zcJBYLz}NH#+kAuH9BaBzeYrBiK)tAi6{UwVz;EZDiQ~hOpLzfKcRX6EA zC5gIH@&@gJM4X7CETL}Z_sxN7jA}ReTSXv_S;mkxH45g-r#3xTSlFqfEnNw=VD57Q zrs%w&*m6NAuenomqh3(Pf5My(VWl>I#gqmn4`b@sS@GojaWNnm^A{fKtbWiqUBJ8G zLyz!dZErUwsXCePZyqyFKi<^VlRVkOHegZo3ioX$%&+}5$RMTSj^_c{hSPc7p)B|B zHWBV=s!$bdf*GoBE4DgpLQ+y?f0JRI5+52JINjMA}G?R zxVlH4yO)48gPuPp9|3teWwV&~ok4PdFJm}zpxjEv_?6~>d2&Al%LX{xzo;B|vGVK0 z7KJeTY^Tdvp&HH24uptW5VE!0DRf35gi<11zVZ{(&GPs8@LW)PtqYM08K{ z6~ZaeMx}oq#>mILHbNB3i{*+16=}J1OArIW-L zj$X1=Gpsa^0byh@<>vh!%1>$aX4T3#vE*tzGQ!9%Rtiqec)h~ukW|fq3 zJM)Bib=|Ydm3a@`sEt8eE)R;&^wDz0=z%tqpLhL_#pj9WK0*kr{AL5qPLSh%x|U0A zTX5ql{Ry2Y?sth!$u=qMxbL9lB3_9Z#Dyw!sy@Q85z0$7*b)4MZjU9du&S^cd(93c zX4JBvag6XHeG3WLDWj1&Jz2}rNn|!1Sq=(59DiNj*=ynLZx?uTLc;}r!awF8g(FH` znJT6UQ!yN`k#!XU_M5|tYS42;$?(Yg0%ys7odlJv}U4kjrWywfRT;g|W#!D%7nvohS46FN~S#2g=?Tm4SAZotOBG?mh=H^0yADt5qcGy%3)k;vhBoTY)n@5 zQ|o1wdBlbP3Iiobdzkn`Hm{ec_v3ncr-YJ)f6m>z%~Nmf=5cBwlOfC0ddwr2U?|BA zE%z|uRnM16$96<8SKOSb38*eXZjXi`*nbun1h1Uziwd?LOyU*E+70zkO1yz0@F#Ihdx@viHwH>-!h4i`rL^SR4c8)oK7 zfRqfEOGT*c^kQnsO;qZF#36)fS7?&4$|o_cQVY+NP<#BrdU7@n*!Y$=0J}SPB=hZ+ zacfQNwV!Iy0%2iqwp(PcS$$^G1!ZnQinnVM0B7vKa6vLesWt~U(482eGRaP^+9Ec@ zY{};4Y5JjVfj>z?fT;!~ZEG3t<5FM(WgB|R3HL0H{W0I%Go>baU*4)NWtEjKJYz`K z6;yk>Tc|NJCvTMCKFEzsafD)b@u{<4#)kIc?>Q%sxreK(qv=1u=a>_4lTqo-mciEI zRiMsl1)t4){d=t#PqW&)jwX#uyN;%AM6+_Z>0LWd=m@L{qZw=Gz2YBwM{^8g-ErN5 ziNjiocqCp0nQrX?8eoD#u8?i$b^uClQ4oR>7Aq+Lpe&~fDX+{VQBsKT0NWVQzr5cP zP>+`N4H@^XBY6<`qQ&Z1K~3o~ErrQ)n9g6d&z%fWY0mjB!yjkJFElACgzN6<6Z8+L zN!%wnKv{lSz_t*N)>;t;bpaq+5eq4~@1Y$|#vU5hk&f1WxBQ!5!Jl=n$O1zb@Z^>U zcs2UqOLZizn5$!|V#U%You9Tbqt(S;1B%TIt6pdYeAN!=!1+!E*;0S03{myM-kWOmR6?7FaA%ld%r`91@pi!X{?CH#>tLLp>v~8a zq`Ws5z|Z?O7B`01in3uQqC(Em6ok0|t=lL*_biwplG;5Ymjm2>sI`Z;;QUX^N{jvR z7sgiSJ(|7Qaf)<`_~chW`prKw=xW!5%Z=B){on?qrNL5AbxLTbWL@%Z(|g%5vOX=_ z75d=&V4xb&&b19w;-;XZCt#iDH;*JX6?!{6)9vAoe(+wJ&WA#cz75;1)i5C?Y8*{Rd)_RL;kNKL zv76>QDbHTdCVoywS`)Wjr%VGHT)*z5 zer`Q*UTPzDfBwq^bPdmkjh*{Z@Y+$f{Xz(^eI(Sr@eq;ZR>}Qm2us`$rIOkoBSZj< z*WXQzvpf=p?C>IJA*1T(CrZn_as73_0y+Qoj0aU$l@?5qvI#WY;yU@t<>(h0D>Jt` zM_AsKKS$QWxw zSa!}X&q|~1k>JOO6llX5tZ!Rer0)pV+Rqv2zTx%B|B1+z552134!!p}7LFxg>drK% z7mFMRvmchh#}qd*D~m^rc#7dQx9Kjh=B_BK8bsQ0eBa_QkOFm_tZ#05u%)+-!iK9b z+Q&>5QQR?c)%hksU%C|4$>ZjH&*PcxnG3Xg?zPzkHpUE(Dn4b1VluMKR@>r32f)3< zfOfts+csn0Hhx3Y^mb--ce04(Q(DOHNu*O@c0f0Phkh(@65VUTv^tT4+7_;uOupf5 zrh&SO7W!Dn5a;%o>97YEY~|kPkn5~jos*bMc}+vQA(14ROCd*gbS}r>Eg=EP8X`tz z{Rlrz>CX@eM7S#I-q&?w|CgH3?&_(31aQ)H4w*VASGO4!w8SGFIqHT+i>E0u$dMIWRV5xcpk zU$6odYYa&5GcoZyJm{8|Z$mnKCD%(X~e-{pmm`H|1gN*tU#CASX2erHdtamOFcxCCa9^U)vTai!s={2wUP7I_~w1?zcmb!8?LDTduz7KLPF1($z`sy-=eSZnR$YE{tL4 zAWiJ&dGVb)7s^Jnxv`f@d59~)AdfF<@Z*o03^R}-wCQLZlz~RIw5>X(58@9-#SQ;J z(6B9{U`3XdN6xHgpt3P*F1+B97hmi$B=wjBrnHq)lcUvA5$_a$`?XTWBzJTJm-V)! zCA4qt>-bB5luv+pBuPk_{4KjDQHB%-v%TK?C1nVIYU#IjYKhH=$Uk0t)}P_IZB z8MBZ7TL--mkYa8)i8r0u(D&!BM8z-7^q^gEcChJH?*R^-#Yxwet%3W4LI^Wl1g?%; zSdiFt%MxNMwL;9*K#`lT@1cKS2j{yWXr14W8(eq6x=xOcsJcCJ+Rk++sTN{p`Li#5l3;i}F zc46h~<+TMv-|6hS%!N^q>w#ZvnB+XoH_rsfNoH~EwaV5?RQ0FyfJ0c9^KDN{^_L90 zN5XJWc^w!d98csG1OLQ_e+bAdhswL%6P+$NeViz~+*R55OlJv+; z3c)L?z=o8&5t)EU!t0Xu{Cerc*Qa^f-}&d8o2S+m^}{x0u+i6;X)D@SjlDk$%Gf;V zP(VoV=jSZja6Zk@8F-_GW7hO_YB$t|l7K$Dx>ZUXFfCHo;V|pD9w-@OZJjRuAEKzIqQ(r@VeIPq8gMv*Ik)Wh7 z(#~lZT(}g;h230hpb*hEep;Lz_XPbE`zg_Od8Tf~GVc#+A;+jBd_)~__?6IQ;Z5uU zhzg6AwfEt9(fy`_*+_32yH@z)4pu+m0AYQ(DPo6S{pV5dVw!?lY2{Np{^1>FM)Krkcr3 zBL92xtAA^UA?TKD(xJo~!o`tMEStA)^+P!}u{KiFyy9ueLGO*gDFBS)Y!lg(`*HpW zjj~SG-WKESv)cEl;taE-TZ1m-NGp_;Lffs?h2EM)nx~9NhnxRX3J;Ak;(BDaL~;sd zeYPvle?qV3RFJogKu)s7F*2_v379KM2EBJEbq5Me#H686a&kBfhJrZ_b?lNh=gJ{A zRnWX=9x0G5*t5_z(uyglZ0Du$8nCD-fjM|Vz#ni-l zkge!1M6<1|Fg+%EMi;i(Y9Oa=a>Vp)> zv`e1JJ4k`BoLm+`M971QuvX{<4W>(47QCZSKRdOphsGBWq8j&u8F?Ts`nq-&G&!9&;Umnhp6 za@006ELcU-WocQ&P%APjc2nh-KSF4{-h4}ta zhg^5w7!p@SX2j6wiuxSHARQc=2zHAVbM*u;f%LTl~x%juHMmzvql%2 z>bf)tYj&ZMuA|IwBTIhsH4zlcYeN%BA(e`*mv`EvmDNxxl{b`Rv9jh~zF}b&A6a6b zxminRAYiaXIRW&F1a>YiS7SxSE+sYyO>7K8MT6DMWWrgsAAsezm?6!6!}BANESJ$* zyC1W5S%neZ8}r08iE#+0I7`ESn-zKIgIf=g6QY28U4zxSRi+KF*V#qb_If#?(~diY z?i)^Zh9*(ke#XbdwT2C){LOXdIj;^Q{*4R!ID?RPiRZ;v-US-%S$6R9^JE2i|Nh#! zvtsMsB$}}c3aY?0Y0XLaoSpc{dS^{FZ)+zfkyK|o|HOrh-})~_3N+Lk59IJ_#s-0C zeMed;F90pIHg-9k*0Kd=U-XH^FQu9G1O*BLKZ4M8%P-8}@e@f?HB)ci?uIT~L7Zcx z^zlZrowk)VK8(!O&qHZhva#fX#qxiLAB(4zoTYgGR+R3 zm#9L9wJVSK!&r3G(a$$ERiTjz^p?0tyOb-A_s2C3X}^6g2c(&+>Z%aq0G@}Xo7(5n z0~tutphlHMWS)qcopvHSW$#+56e=wC-#8kQXA4c}hNILLF{HQrai`?Hor%2Xdqt4i zx{_(Y_i~bD?Q!*+q8_~4S}|RPS*1Uz({yua8Eukw>1Nk{$u^58LFORZ#j=a6GPd2} zJODmMG*TOQq^anNqs?Jf(@_`|Br7hrUF4(-99s0526F~>wVLk}n;+wyf=p3@ZANHz zyzo{d8@|adMSV;Id!*#?blaFzOC@y~XsT?d|N68z?3yX65}xBvNh<0$P*(|PaI+ZD zR?gunyDf3PKraqKY#4OvjvLG(7Zkt{}nRwQDc_tPdH2E4udK{kjPBAhq8W|$)< zH>jT~Y@QDKiB%h`nh!I$TYj;RR!gm?7ATJ?`C_cB9lIf-1Y02%)zK(U!%}#``$+Hp z<-1N6tNz3dT_xI9s?5}57n>&?I*R1FGut~l>t(I>PHTz=&%md852IjA={XKZ;CYm4 zpoMJ9Zr{fpfdHg>(oP*I&R>6y$kE!XCtuxtYt&b9L}6a}@~JLj z)Ztzy>Al*=mySeLrO{`Fw)KV}Ei2*zZS}#Vnjal8TtGV{ZbJOgg&XYDzs{MjDrJ%u z8}VAA9R3trLk)ImKFsnZYX=YwO>ZZyb+}L9hT3WN^iPpw_kBLIxJF(oT0c6;wWsxH z*B`T^E0xRB>a`KNEUu_Y8d*+8McnPvoJb~d!Lf#S5bb0-xL@z@v z%u(Ur#K{>;VqH8D4pAH3ulfgSrb^|#_}t}Z7a_6rW+woG=xXv}FjCf(O2d$$QkP`W zCA^gNsWIZ@!Ei069mTVdMS4|#L@R|JLhWf?FiN$%vY$-By{Q|LCjK32t2z?iGX)`Q zMu;gXum5Z3%}E1CrPx}$ zYC$igCetmr0*{>cVLq1Te2t-z<(WoRMmTv^K?Z}xBtGHvHbGWhIOTv#f;%^pa-3rH5!K_ix2W8 z^gU_L(Pgipm1$Hw9%AeigEk&w+D8@rDFllB@@Nf9=l`1ecy*nWuV}WgKmWk%hR4G{ zH!QQw49}9^9M2Ql@W$&36`*x-t&_gt2|?>q{8Ez7;nPhDFKw9?u);s>kRkzi+t!0z z;QKaq2CmG+7_Kt`vh7?oZjj0oXc(x2m>2%Ju$cBqG!lcxK!V|vELZ!? z6azw+FvTpn+5Sd`tz-&6UEf`wAkd%-sH$r1w&|#IAQ~(=aBl7MPb&NwNirdXDwx+A zng182RXd3-&?hcT3Q6HFE%f~5gF4_RHqZ58}xGUC{3fSeF)x8Hx2pcx7k;d7{TEvr@Y$7 zsV;S4qB}`=PQM5XFle)f`10!A{d}(9j*Iz!V)Ff{!zEcVA}iyU_5|6lG_6OS&XijH zunclSDklpPW-$1vym%=h>a1Eh8q~(;_sa(FdRuY6W5ori;@RNk6x>8h(zRJwh6}Ob ziZr9aY(+UEVydDk?}mO-P(YsBO>cZ4o=2o;)EElQ~y508~8*FqnT z2ib4Qk2i}B1SnH*Mr(oSx zL8lWVs`E;%Jj$pPKs?xj(#mTQx}erMyqI*{WGVO9xh1r4(6JBWswK`ZC}&-{L)+aR zBVmi*982J50Z%hFz*|i^?a5c8DxAE@k)d{5yCg7ZcdhOjA;fiJ%`x-Ia^WVd#$3ObWo+4e z^Yah2lVx(jcxm%m6w6*M@#>RbXG+mD{PB5T78}woARsB1Kc(5?4rF<1TI{mXhYgCY zPMnctyUTQ~SYm!bKwELrt+JD#H?<&#R(n?o6mp{SdA1(%{k=NRmWzoy z74UMO1KOl(@=HE`s^Zm7G)v#o4vyl(Lan_kGrW|H9$+?`24Q&*N=z^R?1T<*X`v>;K?y>UgY4vTb3M5kzjp9k2Ee3Xp6kIB-7MNFrYe zUK86cACb#88bYG1hFE`YfZ+n#@&fG%g~E|HHc0apV>pOl){Np~#~ud%G;j}%P^^o$ zH}zP3@i%D**iV%-r#he75L;CQvW%!)zWn#4Cd@UrPB&A@vu%?me^~Xm$CFb@0F829 z-`aj3$sM|RAQNnTk>vipmhoo^bza#h5N1{w z8d)nbbtYO#S+jtuxsuE2tpqM=|Bn2l=)(e5zNJoMm?i>mzh{k#D8BS|0?0m#jL&|k zqFlJ-z$70%GhH?vCvGk;GN6LvzG#NVsQ%cu_j3J$qt> zl90SoMC#0utlr7nD%H%Mo`~$_tilrVtTcB%%q*;27Tbr-$G2g18lCEaTM}zH(fkuf z@R{@dMiQx!>sI>Y$^YCrM(oH9OiW2z&9T^Wy2;eOX?mMxq1bHW;3MUCv~ z#E!S4lS^qnbZK2cqr0h<-eI`9>u5JVp3|S9>Oxs zkr&6dwiK%~M|p!&3Kp%gQ{eAp(|uR#6qfPfQ?e z;kXkJSxLw<%jOfLI7%$6`^ft ze^R6xstgY@9A+AD2@^bnW$9P4&Zb|S^hH7D$z_>)W$j`@gJPN6#<0kD*U_r!~hl42x{dkLwRf~b~WD?DN zQoQiEBRW%qaV3E%<@k^;0;t9vNF0l9Bx&j&XY3)X2C4N#nwJ%KzZXs9b9H`Zamdd? z5~JJ(Jv5YkmLzK-<}H&-l|68pnK7z;<7vYYi?>PF(N>9xs&JDmV+n7p(7p;8w9aTA z#vXEl3{h{m23SZ+J!Yia0?tR5p)Zr8>SNp z2r0t?QeWQ5`URLJ{`Uo~PfxHvAV4-oHskg(`2#`#l4B0^$AuePH?zN7KS?$UKuP%1 zA!Y$X6|Jtd#nrdWq8(*l={H`{6!_X8;%(|gj^zL&;Wv3H$bHLOWsYb7Z_O+DhJSs6 z`-0AG_?6#CN?p&3? zf7~ojb8CNeS`o*s+Syi1y;iufr+eJB3hv?C7`d!XVzkFqF?(oy=9AJ{M zlS-D`^jmC2a9^RPZ?$+PN!)hp0vMFuNb}k9XJ=X5<7}+h*j3?|+rZv}fv&E#jC6nN zbzcS!qM`gclZg@va<$uDubeN|jqsls>vXZB*KR8Py|$7yFH`>mVwu^Vv}3J&W&xYM z1B_}g=oa!xoqtsirM?F~z(3WpHEv8&<`;;(^Zaq+94Y7J4<1aZeK56aV$pM2?gStW z=OaB~w&;K045sd&>+)3j1H*e)7GZCkJboExYv*<6MT4#|Wn{QJN-A^20A|r0CHR^| z5jdHv`GvPeK%T*!8@E(ryVY-EV-=!h58Een^QUZ?ir*h#QO&J!a(*-4Z9A*ou3xM| zo>CC;jlYE|7wno4&WZigD%{nkpnj5;oFIjq4Nx=?(;ged z!t@Rij}ccUSQ74IYRKnMw!{`0VU=IgDn?SNhyCgm%R>RyDJr2IkZ1vcJ&ZpPsqpW~ z)0pJ>7nT10f_zAq??eFBmzbb*7NMfZTdD{oyN;4hj+EP6aGS6&=t9=C=a(AzGw_jK zy}cCzdaGRV5Lg$;{Shqcel;M8n@C$Cn8{S^M9{Db@7V}Z`%Y{MIx9Fua7IUW1dq<+ zah5;QPMaR_a7vjE1qS+(AIUkIBq@dPG>B3%kXDLxUyzww5}|hDxw0u#hLJKsnt}$= zYOJ}|YWFzkN#eJ;zrq6mK@q2wGduP!BwrXbGcx5v<4lLn0+dCQy5d8*%!>|_wBbpU z%WqP@2~^>LJK;fGWbuPqSqKqMJI*7VNq`CABbuQjANS=f5*-z@tT2Tur| z?c$b#VUf28mJ0T)gcE`5iz1f-?@^wW5mQvsX9AFtChEvHbZEFlkA^QW*Zfm(^Y4ZN z_3AFc@bCyh`BNk+M3C?+b6p{_YVn+0T$yz?;y!26fT0910lwivr@=9MKplL6JGp}F zO#yh!Fd|W8D@JGj!E}#4WZYC|T1dm{f+LVm{9Sp4cO6=L$O+m-HD~L_NddUoMS1)l zU@ng6;uZr)416JP)0oamj4$pyk43Oiz;10k=94|Wt(*4f`=fNVj_nf$_d9bAfJjpD zYp%(jm#T^ftY_@#TgrB_`CTtd> zT2uH?tXP^OU)pvuo-2(}PL7}>riKc{{ONVWP_iW6S6@yenw9;L{&MDAIzroZ=SFpu zE>7E#v)Dva1Z0>WFv1l^=I^ny5q7wt)9XTu4=zMRnOWYfbU$QtwFkbvQz?&9$jS8l zu_f6{ZSds}(g>^oZvBjaZLSrao~7|by|SslvLo{Uyds5~r@P8u4P8Of@t7^``m=*l z-djw}p^g+hlhap9qqvb+-8L-K_YR^~UKT@T>j`l_Q4C*(ys7-EKw8 z;*7>Q!*yiykQXO@G4a-mG%8mH!5}bp$d84?D^v=e6*GP#iw&@EXn}CRbqKA%JP}%Cd6n3KA^y#_>Zp$g({iNb zGf2G_?@>la4b1%H*`pfrjxDs9MYE?O*6B6oZ7<*O+8=`AJu69`4slDHd(Rtv->3)@ z;AkH-9)ShK+cpO7JV2)dj|1ig1EpV9GtuKza*hqe%!=7X<ob2)f7Gj1< z3W;j6-Qg?`r92L#tObT9QR=*2n9EDeBng$`HREo}M3`cTX!~{4)Q#CSEkDh|SMdrs zH~EU77E0bGRTZ=*7z&mSErvJ+u#vas$nloDgjp2zgp}!k&#@grtX=HxdAp$qu zpyJh7nAn(RkZb5-ANP;?&syraRJ7zHb20&;Z#jZ-+kf_-`qxli5nU@6N8rPs^TK`I z=Y7g_RYwVcnauyF>t-<4?7TZ-@ehWRl69m|&!_?q;CG@UJ2)r4ea9ZIg5}-DALH}t zXMcGAIev_y#|@ACEs8}2Wf>QU*JL!gchFP}Hnqs&b{R7DgYA*e7vMeL9mm)E`9R!o zT7cfLdi;$)`3VeVYgqyBNfHWQ?v0*r6za zG^r_w?@k?)iQ2(VPx~I&OG~K0Sv7N0F69$%Lf%?Wh^o9Msj`qqvz!R9LRAb) zTYbqOeDwwOMapY*KTvRC<78$QNkY=cH@Qv%a4=l-T&V-mC2rU^Y74}Y06w}9B3czp ze7?e|XmkgQ9IJ{_9m&k-y6o&~nqEx3fG)8?Onc2MNR10BsdNIWp~?aGPbi!5lV9? z6sS%r4`c~BB$0CyOlu9r~d~6AliTb z;{O*15bwYLhM|YEiJp_QqlK-R-aohVFSgAg%9FO63=%U9-RZAa=;y0gND3)*%khTWcg zUYE=QrZR*@Q&bf+l9UexEU6$o9EDVZ>oq}p>9x+X!Lc2z$Dm-e5*m8f1-MrNw*GgB z7qOSbr?#6*02-P?WQ-L;?Lai+DD5IReV?;ikkwD+J(1q1czG14k`z+oU65RE_q+(B zqXMA|Hq?^Sg>Zoc3VYdYHITiDCfur1=&l1qy`JE3rH`Qm!_pi@E|^@(>x}Dl4@^|U z2fMVvi+DU}l{PmZy~ZGxqq@)=Kp(9lrVUX(2~;-CbHdXoIf^UR&KG*tqXfet=aB%<1G99Q5iRFps*=%q={afl?`# z@s5Yw34ElTOWnCc#QlJ?vzfenu=BMQkiOnSzrB4|jIuN0KSt97!-n)ip+PeyHu!5x zab|)W$rVkR4mNg}1MLXZ{?v?z5cg6So+F^&kGBuDmjzq zk%JIPl4 zeR2NgF1J%7?$4mjP(3a|?rhQoXln^$6|#V0RJgQEkm$Ls`%)jQnIIC@)Z*vnuTFlt zQpl)@MXZh`m(jQD>JdnnJM18aAj)gkoCTdEuZ2z%Whm;JTCHM(@uk6fl8W0{w(qe+ z(RG(_=yg%{tA6(?D&`Ca`YpkTI*qx{HJUZj_tP6tqxn8p04-o6%0Ju0pO?~UZ~}pqj?uOsOYnRJFMoxZ@EoitE%Bm#)G`+w6PT=Oj!7BDN#H$9X&ppeBySyfl zE7O2V!+&O321j{77R|MxE)|xr5WN5D~!+1Yd5$0&`> zUpSIx_*mYkSjpgp^rVcvJrV_-}M}5Yx@qUzX6OJ|e_t$Ge#^mC5H_sry z<&iRfkfWO=f@{-&RlqF_A1UT>9Imp|HBQF{s34fd+8|N?cWox z5eT)s$IIjPeB>|1ci))!Ec&;5-hiVf?wf$fviRw4c$!{;Yy)O|ExI@I-ZKhsRA75n zI9{_HS2?Cb#V?10z7q11s((9Zp?a{%cq&t^gEKWmhkk(n$gAPwg9P+1xDN#JKgkRC zzsu{t@O=i3js_n8PG2!9o3a6ra(es2!(+ zJ%Qo&W24uwh|t6b2c}ZOg#^#3RLTJ)B@oE!*I0wd< z2a_ua5buGCUCwe^wZxNvpz@Ibfmu)YI+rkcPm5w>y|1etA{j1UlHx zN>AHx4ShC<-<`(KjXORWnOZ{0o_oIM3MAg?_bVfaopiZ0@kk+QD$}rLC#0pJGiAny zMacEYb-h-#xS%pSh7Ha4w{a7i$S!QOMtV0x&@wfl9j=IAk5Kq^*&#Hr$rG{CfBsj^vU z((Z{v9;z46%&jvHh}3zl%OWnR_Hr3IbYuw!m5wpIS%x#555iIf5d^V#bvRVxEw!w8 z>PJ_mDpe7HK#`|VWiHRIeHtnrfh%)Bpy)}Ar>}+4tQu|6180hcgB9*He1DRHHFiVz zK&L39UIIW1BIwbpG;*CI#3AgeTh=b&vgZN2W`pX3FQVD4JL?E_-FWi}axMaRIm7jc zWs(y4r5?>#IZsw_KYa`d$#woiqK%h^(4T7RK%?B#doj&-dVca){vbQuaCr6pY9fVv z%s!?-ov&v84QZZ!?t|R%=BUTSJbtSFMd-pg8AnM3PnGP0Z>$n{EeWz?U2Qt-ceLWd z2{OIPvmhp_DZ$0MFfi(ZTDpghzu)+xy}1FQ>JBMJv!rI@}geIJv`Oc=CtAjh7 z^o;(Gv;-DW+W`Jb>k0fnNsHiLNXy8;+S<^-$m-vz>smug4x1Cj_ivTyEZ*2^P^bEs)j^Vez{_1WgLI)NGKN_o_?UxfeKe>8 zj&M*sbcCU(&JE&dX(Ggxm`!jg;;WHFc5p$0Jy<=x1x1@wL3o45h;M2Y%R;UKkBkKp zod>KvZ;)>RI+LRWa3T`aRU)OU9rJ*^JQ?o89$PAZF^NJbxpr53y$)Wmx+x7%BPXM) z-5sv$iGH6I(h? zlR~iBP(+Q<`MuYQISUVyP(pB&Kky#GlR;8AH&N9}Cs?4F1=;3skDZP!l}eUP)B6VO zej$n8W0{#!kd%q&3&lGVgivmWy5bC5ttR!-t8d-+B7(N4W5PkYe~2pZnlG4t)Sf}< zCiDuJ(^y}C*`5ex=4kvLgR|l`W+tt(gG0cJ?EL5=kZN?Pd zP)@gI0XJW`OPhSq&o9VjI{0FUe*hxlV0hvxFdE*TLfe=?t74DuhZdJjg+Dd0=h-?e z3OB#SX2d1S@VJCp%XY;qJ=!y~wcby&A%*~#r%qq7L%vWJUN&{@N*`T2H>ceo`Mq92 zs&`~#3y`t_#WRQqq(${>CQW_b*lHbxuAFYFt-udIDVZX5^Rw{5EiuFr!8@VH`qphx zwF)$mi`Po-w)+M5LIUI?Mds6$`tm4P4KDE4nGb;~2(5`GU>rmMHN#stEav#(`zvU(J09#2#$jQJu7VOA*h=Z9XuWVnK&GRg@w5oDeI^% z?kKRJeyx=`3Aobk#vNHGsD@fb0-H0VV$q%II}HiQ!z&dK|ry)(A6wlqm}FZQ+1DT^gmDaQLP6QvLX)_3E_&gQ>p2Hge8U_goOJ660+Ksvbf@xxk%6l4&r+F z5_>5zHDX1F6KiVmwX$Bj%cL}j`c?mwk}OC_Kv@k^u!j5cp;T4#s6>&{2`_r<*>@%b z2=Z<+Q@Y=6d6y^-nNC2W!F0=favMi1!~ru4W)7zwSXh$a1Qt2~?R-h!P%ffPaS++M{*zg>PbCiBwR9t1U}C&7kk)J}AmGwxfG8HP!@5c#5)p#c- z^fj&$C2}0A(nggN4q3yaCx_g3`d!ohxsSnyAUne9)0{uQ4yN>q0E-1F4gdwja^vpF$6sMSH zChKowp^btNQM{IMKS7#0yzO4rc>e6XOho^F;7xVDUvg>yY#`%@Y2_ScAxcmQ-{q;= zj9ZN*K_L@Pua}=%#*7z~@2h{(M=J^QFBixyxY^_8gLmlPn@m%dBcm96bFu4>?$?a_ z%d=H#st~63qwW^3RBx`?z&xPmR--oeqg?2@(dAWLXi#RNm?^!K^I7-$31ED`Rt?h~ zqYY}9U(9#b?{+_$O1{OKcoa#2$jwEAX4A=VZ;4tawQ(g83l^y-5Z5(={6S1MMr|%P z>0e_ZMW7X4Mg%)i@Jk0~qhkNQkElWth@S!c)})mpL`lZ6TCD=6_oB3VVU7V)xs}F* zThdei@M${d04$kuU_&-&XD(CGe}n$Vp|`p|e1-qI^8)UFa%lX2;n4pR+V*b^n&;AW zb7cPK9R>Rwyf0Z0RK5ZCcVP|{6Y69$U9NcNIirt#>YidSED#NVr0GuC>^%C%-*j-_ zfn<1MN=fs9_>rM4H#0Z0*C5^cheI4k|I)Kv+}Owb`VO`Axq0RaCoE>ndNRl_k7*pk zo7eE>-XEpXN41W~Qvr8jGM4DmDWfP>vGkDsz_k}^)LKE)%^@9{AOrNz!y#L>#^F_Sbxo`t~H&|fWZQYVA;G{5h);CVIVH3 zY&c}t>lXsCOfB?c9a7YeoZuxUGh@oc5`# z$T(;)vas1hxPJTy$BXulk*{O(nrIt%E#%EkpaApx2c{dym0n0ah z?%y-^_KybkMtXYN(PNDDa35d3W-J)7OK6o&UvWXLFPlm5`&)`#k&F`c)oM}Dmw5u&70Tvx)!1Lr%(8iEn&akABUcs>Bd-0n1LR`^p(u; z))1j@)yv%CIBZYYLWOwce%gZCxOp2IPigk$bd}9dbnpT42}>Iv%Tq&WA<3&;$7_4P zC3W=@CMyv+J3HP@7>Ej~A6g&XZpY51{k3Ui_AREtzc1{e0n$)^$-q141FMv=_IU`j`$NZI?R zkESwW-tp<@!7#@}aAi*2o1-KkLO4>9fUEQF-3vBsudk2e3DAVfbHb_%e-h*fNwbZ8 zgiovDWq=aiBD4U?h7oi3PlP1{OKvfXK)Wu6G8~PxREtezJr}VN^oSsKn~ZL7!8F;D zF#usYbHVO&A}FF{f$t|=rl+g^5d09x;qyPhdgzBIEQH3p#f?*F*Gt>)3qh2QwSvG_ z5cE3&gFKe@8P(@~D_ipWD#<^fF(UDPL;gv7?}LenCx8ZzyU~HD+%UY78||qWZVJ-B zQ=bnRlHD(|Ql-3+n`@V^O{IA(w#tw~kd=F=?yombeq*`-bh@y5S_^D(g*2UYm&xWF zRK7t5N9cc49~9;BLAcgG)IL0ZZF8`c?|W? zWr!W!-wm1r=8YXiz*hTpc-As|lOO+(k+W!XZ-AOmgY!K*%UT*bBq&6NE3Y7$-AD)L z?`=-uOh9Q8yG$iIi+rBIOu?xU8ev;%h>hT9B1yTRhFZjF#E18RdAAnkhoJL#j8O$c)=N);wtcwUptT!0`U2O zCh%;4_>T^g0i2Eif|EfGAmG#>ClCk+6rru9HQG=}QHu_G-)z;OnM&qQ)z%S%muT!Q zo;ma=G?6L}N;{(J%>1$e>d9w7`XR1S?IX$hYGj$`_|0Sh>v1AcN8(?p^H>>5ZHzwM)rmgUq_^HEmb{LxDST>6CEHaoAkfQWPGpC%WMjE1v2uodJ40Vt|jE=G085;Ys6lyqlkb5V$7?s z^_Ov=@<&{m4$ipL3O%TrWVFW9#)a(CTVvA}=eiC*Qra2C$y$H$$#x%eW=4j~>xW&M4G8m~0t}Yaw4X~JL z=Ab~Z-6)yuL|!rxSl=G4yBM*-!tZLv=z#g_*~)d8*KiaTU~dF9kY)#tm(hClwgdd; ziG89*H_VQ(p#y$~g{aCn)~LFl0L97qzx3iT{Ht9{oLv5!6Re7>58y|ik9oZY3K(u# z#98t54;uiP`i$DFk`5X^M1#N}^$(TUk>4fr?I6PA`BVsrq)`1cB%`B_5`_rVe&De5 zZdy!M60e2RKwgV5^&p@sl$FnH(8!`Hz!*(DS$7r!{?@%_ElfYj)`uBl78 z0eZz)#&s^Ko~xoEJ{3C7p}t%S+ag=4+!j9*xdjp#Ik#3FdA@H(oB}UWqK`_u5{V>8Pu1RQ=76ekqagc($?9>Z0;y`Tx|Uw$jcK((QYU0 zEy--@Mqi54f0j<2`@QwO*kO5p_sf-+UH4~yGECL;OOZx zLHS@v_E%6|i0nKnKB?Q+fw->4TIr~3IVgQ`=kZSDVr(39(%Xmmp*{O$1vGdRfwrD} z?oI>f;~}57y88fPZqp7|D6%^qUI8_Ftb_FLcVH_2g+FmB(06Z`WgTO>xQx{7FxW|D z>h&aIRJoU|Bv^cElx*{3G31{T7Gzw1-}%%JQM>*^CQqBiQUYE^%pJ(b<|a(A&!|_+ zA@v)#Ew-gPucdig`9-_Bl&qLGDK)zKFX?0;6(?&%wExYK*sJkoOn01?iguoOxZOF0 zntW7zRmv6X4MDMhd^7%Aui9e=^!vJ9kIVW`TcV0a){s$yjM7pNPLl~<7mxe0pTk`R zFPxsX0wOj8=as^z%Z``TP$fIqCHvX*TNt+3IKS^S*C<#R>awy6-P~I;HvI;^|{7ehXD!4^}Lk z0F~hSqvwXFWFrPCI;4mwb4vd%zY^Ic{H=H8RWe<$yf(+=W(|qW-k+4_hf1vy_0qt7 z@X$AY-gHReNV)J!9aIKYrEq^9XkpJTbwq6}VyHkZs z1mnr5_DDT4SlQ~&;i}~Y!rgn$7vn@Qcy#er#S+fz^upy0E@Ox3ms16?Iv;ikOJ#HE_8`(r(IT&WaB+cb!VVllg$BvU)u5Gn<5h=3ES?j zF0`1lyUuZ_*lKeh3${tS%^H?^vHvh>VUpXnB~i+J&cSzo0mRubESq;D>-FA4W~X0| z`~8p{%3LgeG8L4Bpo#fx=YTg16xkW~0v;pL7*`C)QS=yAmS<5NR%>WODPb6$7CS$t%ahdz4lg z+bHD9X%}dM$&+A_qUrREaeBf5=O3$W^W^yYRQG4(V=V(7X%2(#`3%y?K7w~3It-Ty zNDK+Pq%c(xRkfKgur%X6sP7*P!H?@}q}PT(WAGl+licj!WKCDqyJU4dVUs;1$T_RL z!r`mexY^Y1swj;B3}m4Zd#+BS*L?;dJv{ha__lwz`O5nczCaf9F8U5svA)9|ZP=s0 z4Nsp>LC<}JjZ;*U%I~3=Ny4^hw73g?KWPqQbK{eGwRmvZ3X6iL>!fdZSm$L!f8Xb} z=J)-8Q)n8-`b8atbI7DGz6DmjY?-4pR^G(lc!r^+I9tx|KAdFE?@_5Y9!0QZD)2N4 zPShUY0G1;B40&x?s&c@>mxVnP!p>-2rO2yrZ!_?rly7^0h^8H4znvf;ARN7K8T?UcV|q#T(Q^1}oCm-FRgduK<^hK7L$*vHgeYN>#3 z9g_&GWT(Rzi#)og!8y?C2)D6gggZb!1bhSAmizA9(E}eXC3a0uXPvI7`boQA7nJ>f z2*W|O!NB%cs;)6~E!#`>@C>44Hm*O~P-tSChvPKcM~f|1AIX0#SV_dL~|G@&H`AaCeHdO$-hR3dm5S+5{s97KdrJpZ{1P$@-on3r}{7 zb!>m&KC;9{RhI8k+4PH>9ruNDn=K6*g6+ab+4C9i5P=cXv5fKGsa!c_@H*l9dNOdT zeXP?OYMMw3AC8Uh+WI1?7R>L4imr6-2YR;OxL9(hJ;j92-19XjOW`UT6GzxT41y7T zl@b`9?95e(t1s@zz~l|XWv^gU2lX3Ncf(#LENKsXlNqcou41V|?t^(v~UyVt#i4xVpE>usFb8CHY= z(uM&QVe8McW=&dbS^;tMOxh<9_8zGi3U~g14a{bMud9Z9`n=1ER+|-NE#T{$fl|GY zH=My=HnMaPm={TJ2Wzi7W`Gev-7j6lHh6z}Qkv2oIe_$feL+iC_YR+7yxYE=*)jsw zS>G-XE}*7n$oEbbpT~{4K+*Bt9oRBv%3h5pzDjfDWXY10@W#LootsoP&UlH8G6WP@n5F+%q^ zh~7ZFnq-&?gGx#+7BtR5LMbAq7K6vk^k5S@i&Dfaa&)}?;(#{jT?Is{l}y#TWRHdB zZcuz&u+E@(3Djf=OOsx6XD58;T4wXjI$4x2I7yu+Y!^0U$+2xkrOeK&F{h?7rpbAo z5g$~9@X-Q`9*t>a5mV?O)-;DQux0}x6z+>62qC~qm8UIi8|({~jEn+mdq!f_grv=h zNCwnVwFKp=PJz7Gpz!C73Ow^ce+p^8j zeOz&YF3li6zDES|AX(Gq%8EV+C53KE&vUG~iB>C>>El6YoaBEj;kEvdSy0EBy|@_a zon9|P-9#bX+!pWmP#pM{W}aBh)f!S{W|FOzRzC`6;4oJ^l?a*4l+9#2AUN(#t5Lx^~mdG_M{O3_-apD@!LtwWDJy85>QUJ9{Dh5;Or0j+ENNBvSfQ6|8Gt$xx-U z7HVkfzF1yo=T-Ra4K8DI*&vfc4k!MIf}S&}1hwxWNvQG^ouD8=Z^n(%U86B5$HK;6 zSJtR>gePmp=4gUswCq)RIjmY{Xj($g2fVvtDq5E~U!_km@@PErpKG?m~3D zS+KRVgd`U>T}4F3$|_pLhoy>`l!-O9R!3*1Xk>I0J)fs(hNPr4zq=0W1ND#7(*44R!k-LzuOxElY$@In(K^2hFV=NQdooZaW9`pb z4u+Uadz&z0v>y$0d$cDLcja-QcA#Q&^1fhLFC-&_Sk-IS%R{%57+T0xU!iPj1wO*6 zo^729yf_S?UA*v=;b^?+|G zXs$j1?FZ(4>~Z8}G>+r#MH1#X1Vlg!m^aAA05Bih1SBy{(41d06c1KTsq(TzTh(L* zNl554`W6=+Y}4uo0iFq!Pe=8f0g|m|y$jN}cBJ_CpM+C}Ya*Up*%H1oPb^E@XO zed_f6IKvJM9t_w@RldMH&^=bxB6tTrfe$N=<`S54ITf9pf+%D|QM6Ia3gF=~&K!Q;0Pf>+)W@-t z!Wj|OblLNj6eSgQ(!u@q%w~dDAiI4c_`@1TH^5a@pk|_`lIu}@=u5)q^9_)mbg%UN zG7Aqrk;Dqhq)j=}cf3|=${cGVlw;sI9|~P2j|+SUx!~T05%M5nM|-=#Bj}$di23t_M-7Am z`+)B^caMmHM*tlk-KoH1Bp)8uT!T6o0WNi{_v2@4G!Q2v@jMA7EbwgGvP>MVPAy0; z)7kPKC@7Hegfb&Am2AX$Vf9<{e zd5TiX-qK!E;f0M730RfYfrR90KpsoS$v0CWXpSgdfJ>h6lLh@)iO%a66J7>qiCao1 ztQ3#CkLPtM&P0+^5;=@?OOiAwO)4w-)$KI(u;cUO9>u=yIE>}KpXfJN&aX_2RCa3Fa6cN{3&MP!YO5^J$DEM|N^ZNjUSya#zhEazcshz- zp%^CO znUtdHxFpr7?>72!%ps5wph;Q zb5?;9e#rXuR$2eebepN6!8Jf;7I|rbp2J(D{?CsvbZWLaYKY^v`30eWp!m=5Mac2m1f@G2vwL z@BetK>BJqdq4>owO4SA%;hmXj5ujK4>$W*P*P zWQOs``gi^$n?|H2N!GVU#ojJBLLkdj4s&GD6)$B>#8%ce$Ghi5D`Fd;f+vfaaWpWL zkqEuSCA+Hf=40XTqV#*VQ7I|{**Ug3T0p0GDreW`i+0zcr6eck%_-$6)|$Pz{KSIz zIfAdT1UOM|ce}oGpI*q7FWS~C?;R>*=g5>YS8>!KwGGH794EB*kp#IZ+%jz9`4myJeM6B<`?S&I^)YKW#6@vB_9p=APw?;V3vBx?^|6fj~o+gz4`%r z(73obhMa-fMTlQAy*t5(!b)%FkH$drHW06RrG@rvL9@A$7AP}B4{&B7KUW4UP=gK$ z6hQ*btaPY4g_upxHz_l|^(U#!AtdoAOaomA+Ey4RqOr!3H)KRT+m`4)+}I8=UUwp4 z=6FnMh3X0S6$RpB&seAe4Zuioa_(dy)3j@s?lOA0NHX?ef0z8o{%Ba16JL+^&Vw^0 zg~~A}v$JU8#Dz!L?}j?>$*B+@4i2sDKbi!lH_bmn#1h4;0y-6Rzxl_}NR@)bkHtT9 z5sl(286oQrPU357798sXR(B^jx&f+G1+zAEf2(Y4%vha$UDHvmbnCtc-OB@LN@woprEEGjgZuMxUncj3+TFpO| z8WaGCzZXZ?r!c_(@%g{9&NeRA&KAz*jwS}i|EAQktY#y(DUR@cqgOA(x0b3H)v6@N zE7HNzCXa%Tx?a_+zB_`+pAIgn}agMv7qDc+e_VeX1jQpa~OMWy@)}l z_(hEcO5_u_SA7~<8#@m@{2C{=#Gga8nN4i@`FozcL8g+YAd4F(l%Xg{dFYFoim2SF znDYX$<)zPl8LVnXP5`)Ia3FMZ%_D8&gdlyv^|yx#{fvpBN^CINk6LZMoRw4`EU2QqPgn1__FV(Viwe zF7spEQ@+C=24ZpZxf#xmQXHu3);Xw!6ReA)h|W0ss%kG&Guu&Zwk6AP3yYI~_759N zH?SPHtEXSPV;hXT!H1meC;0M(4hp4d>7ahb(S>GM8fiQ7cz0su>6_uPf^pvSdN#0( z|DIjYuX@?^F5m8?zG;4gV=Bv~kH$KuurJn93X+U90{%Y1%No-@Y+v-B$2@hWq0~>nIu5xwQwj>Tmy|rs6HnNwwV_&!7sd$ZQgTtb($34ayN==#c)_#_M zoAeGj5>>lISHl#MNOHbMT9sH~1pN z!aeO?CxzFWhW}={k>|tIljSw)&~vv3@wA|$bvtLb98O3C79@q$0lRL}ex}8sIXgZS zg>%?>$=Wr<(FgDCx`$ltmsC@9qZXoRc1?=$lz0=O-&~3pZpjtRRJVXE_fAh36t59< zJMnO3(f6}?7^Wl2?j3pU{R~_5)zb*pcm{rsnoQ9*I;pZ&=)WjB6P$6CsVo6Z-WaOv zTwHombY*?;LO*5{^C$9Tu@ z>GF(rIbrog#fpv=b-U;LYY6FOV*YchK{T?Y5mB9tGN_9C;1xyKSn;kiOdojfR%i3L zWJvOJ$r}&i2$yNKL7t5R1J3=(wZ03oaLqsO_J>?1H4V$@odb2VA9 z@=4Xd+F&9%4-p;r-#r$|-aug!D zR-MSm!Z}WL#Yt2nh=wahGD*Iid2?xHPvM8UZEtX2{9`s|LQP15QW2`ccY{2`9O8cHOH>UXtt0X`#9wRv@MnUSB=G% zQ?bQ!^8Ev{QtyS&3rdw$bW~pLW&G4>V<(fktUFMjVQ>5{0-v0C{wcECf3SDN`Dym; zNT0ZX@sN%hrMc5rJa2_5Q7%Wq4Wm=cA_#Ncb{$pba0^1Ar?9*m3T;O9(K4BrvmwiD zxtoAR&LSDGk?ud*i0iA3F1iK;diwe9;aQ0($I)`qZRGaE6ss}|%!!UC9m-wG<`Zzh z^i0gS;butz;5PigfkE$sAE09e&S1Ne3I-U^K7ILU&1i3(Y_&mvUY8jbKH7`jI{xW$ z1n6>7l=9chA*$W)N#C*?arp@sDqx2k1{jPJLh}Dga`yAVJAP5@(&lhnS~e;M+FA+% z2IGcs%7KS^`w#61FSjj88wK7dnID3>4-CL;qqgTNfYrCWU7dq~stNoTB6w{tUI_8= zelQWd-7@X+fz{Vg<70r@+b$nub+GSAdwT&Js9ZDJ>!yE`zG}CB3~~?zAw{WRAdnyQ z2(YO3FoYKhf_8(U6MeL)-~kOmNOAqb zc2gl^owT9g&K4A!8Mzw@85yAs1!sFuPcV&vK=>6wFo*$Yy Date: Tue, 29 Jul 2014 20:58:05 -0700 Subject: [PATCH 6/6] [SPARK-2054][SQL] Code Generation for Expression Evaluation Adds a new method for evaluating expressions using code that is generated though Scala reflection. This functionality is configured by the SQLConf option `spark.sql.codegen` and is currently turned off by default. Evaluation can be done in several specialized ways: - *Projection* - Given an input row, produce a new row from a set of expressions that define each column in terms of the input row. This can either produce a new Row object or perform the projection in-place on an existing Row (MutableProjection). - *Ordering* - Compares two rows based on a list of `SortOrder` expressions - *Condition* - Returns `true` or `false` given an input row. For each of the above operations there is both a Generated and Interpreted version. When generation for a given expression type is undefined, the code generator falls back on calling the `eval` function of the expression class. Even without custom code, there is still a potential speed up, as loops are unrolled and code can still be inlined by JIT. This PR also contains a new type of Aggregation operator, `GeneratedAggregate`, that performs aggregation by using generated `Projection` code. Currently the required expression rewriting only works for simple aggregations like `SUM` and `COUNT`. This functionality will be extended in a future PR. This PR also performs several clean ups that simplified the implementation: - The notion of `Binding` all expressions in a tree automatically before query execution has been removed. Instead it is the responsibly of an operator to provide the input schema when creating one of the specialized evaluators defined above. In cases when the standard eval method is going to be called, binding can still be done manually using `BindReferences`. There are a few reasons for this change: First, there were many operators where it just didn't work before. For example, operators with more than one child, and operators like aggregation that do significant rewriting of the expression. Second, the semantics of equality with `BoundReferences` are broken. Specifically, we have had a few bugs where partitioning breaks because of the binding. - A copy of the current `SQLContext` is automatically propagated to all `SparkPlan` nodes by the query planner. Before this was done ad-hoc for the nodes that needed this. However, this required a lot of boilerplate as one had to always remember to make it `transient` and also had to modify the `otherCopyArgs`. Author: Michael Armbrust Closes #993 from marmbrus/newCodeGen and squashes the following commits: 96ef82c [Michael Armbrust] Merge remote-tracking branch 'apache/master' into newCodeGen f34122d [Michael Armbrust] Merge remote-tracking branch 'apache/master' into newCodeGen 67b1c48 [Michael Armbrust] Use conf variable in SQLConf object 4bdc42c [Michael Armbrust] Merge remote-tracking branch 'origin/master' into newCodeGen 41a40c9 [Michael Armbrust] Merge remote-tracking branch 'origin/master' into newCodeGen de22aac [Michael Armbrust] Merge remote-tracking branch 'origin/master' into newCodeGen fed3634 [Michael Armbrust] Inspectors are not serializable. ef8d42b [Michael Armbrust] comments 533fdfd [Michael Armbrust] More logging of expression rewriting for GeneratedAggregate. 3cd773e [Michael Armbrust] Allow codegen for Generate. 64b2ee1 [Michael Armbrust] Implement copy 3587460 [Michael Armbrust] Drop unused string builder function. 9cce346 [Michael Armbrust] Merge remote-tracking branch 'origin/master' into newCodeGen 1a61293 [Michael Armbrust] Address review comments. 0672e8a [Michael Armbrust] Address comments. 1ec2d6e [Michael Armbrust] Address comments 033abc6 [Michael Armbrust] off by default 4771fab [Michael Armbrust] Docs, more test coverage. d30fee2 [Michael Armbrust] Merge remote-tracking branch 'origin/master' into newCodeGen d2ad5c5 [Michael Armbrust] Refactor putting SQLContext into SparkPlan. Fix ordering, other test cases. be2cd6b [Michael Armbrust] WIP: Remove old method for reference binding, more work on configuration. bc88ecd [Michael Armbrust] Style 6cc97ca [Michael Armbrust] Merge remote-tracking branch 'origin/master' into newCodeGen 4220f1e [Michael Armbrust] Better config, docs, etc. ca6cc6b [Michael Armbrust] WIP 9d67d85 [Michael Armbrust] Fix hive planner fc522d5 [Michael Armbrust] Hook generated aggregation in to the planner. e742640 [Michael Armbrust] Remove unneeded changes and code. 675e679 [Michael Armbrust] Upgrade paradise. 0093376 [Michael Armbrust] Comment / indenting cleanup. d81f998 [Michael Armbrust] include schema for binding. 0e889e8 [Michael Armbrust] Use typeOf instead tq f623ffd [Michael Armbrust] Quiet logging from test suite. efad14f [Michael Armbrust] Remove some half finished functions. 92e74a4 [Michael Armbrust] add overrides a2b5408 [Michael Armbrust] WIP: Code generation with scala reflection. --- pom.xml | 10 + project/SparkBuild.scala | 11 +- sql/catalyst/pom.xml | 9 + .../spark/sql/catalyst/dsl/package.scala | 2 +- .../catalyst/expressions/BoundAttribute.scala | 50 +- .../sql/catalyst/expressions/Projection.scala | 39 +- .../spark/sql/catalyst/expressions/Row.scala | 40 +- .../sql/catalyst/expressions/ScalaUdf.scala | 1 + .../expressions/codegen/CodeGenerator.scala | 468 ++++++++++++++++++ .../codegen/GenerateMutableProjection.scala | 76 +++ .../codegen/GenerateOrdering.scala | 98 ++++ .../codegen/GeneratePredicate.scala | 48 ++ .../codegen/GenerateProjection.scala | 219 ++++++++ .../expressions/codegen/package.scala | 80 +++ .../sql/catalyst/expressions/package.scala | 28 +- .../sql/catalyst/expressions/predicates.scala | 3 + .../apache/spark/sql/catalyst/package.scala | 27 + .../sql/catalyst/planning/patterns.scala | 71 +++ .../catalyst/plans/logical/LogicalPlan.scala | 2 +- .../sql/catalyst/plans/logical/commands.scala | 12 +- .../sql/catalyst/rules/RuleExecutor.scala | 5 +- .../spark/sql/catalyst/types/dataTypes.scala | 18 +- .../ExpressionEvaluationSuite.scala | 55 +- .../GeneratedEvaluationSuite.scala | 69 +++ .../GeneratedMutableEvaluationSuite.scala | 61 +++ .../optimizer/CombiningLimitsSuite.scala | 4 +- .../scala/org/apache/spark/sql/SQLConf.scala | 19 +- .../org/apache/spark/sql/SQLContext.scala | 25 +- .../spark/sql/api/java/JavaSQLContext.scala | 4 +- .../spark/sql/execution/Aggregate.scala | 13 +- .../apache/spark/sql/execution/Exchange.scala | 8 +- .../apache/spark/sql/execution/Generate.scala | 13 +- .../sql/execution/GeneratedAggregate.scala | 200 ++++++++ .../spark/sql/execution/SparkPlan.scala | 81 ++- .../spark/sql/execution/SparkStrategies.scala | 138 +++--- .../spark/sql/execution/basicOperators.scala | 44 +- .../spark/sql/execution/debug/package.scala | 8 +- .../apache/spark/sql/execution/joins.scala | 44 +- .../spark/sql/parquet/ParquetRelation.scala | 18 +- .../sql/parquet/ParquetTableOperations.scala | 14 +- .../spark/sql/parquet/ParquetTestData.scala | 9 +- .../org/apache/spark/sql/QueryTest.scala | 1 + .../spark/sql/execution/PlannerSuite.scala | 8 +- .../apache/spark/sql/execution/TgfSuite.scala | 2 +- .../spark/sql/parquet/ParquetQuerySuite.scala | 5 +- .../apache/spark/sql/hive/HiveContext.scala | 2 +- .../hive/execution/InsertIntoHiveTable.scala | 2 +- .../hive/execution/ScriptTransformation.scala | 2 +- .../org/apache/spark/sql/hive/hiveUdfs.scala | 6 +- ...se null-0-8ef2f741400830ef889a9dd0c817fe3d | 1 + ...le case-0-f513687d17dcb18546fefa75000a52f2 | 1 + ...le case-0-c264e319c52f1840a32959d552b99e73 | 1 + .../sql/hive/execution/HiveQuerySuite.scala | 11 + 53 files changed, 1889 insertions(+), 297 deletions(-) create mode 100644 sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala create mode 100644 sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateMutableProjection.scala create mode 100644 sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateOrdering.scala create mode 100644 sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GeneratePredicate.scala create mode 100644 sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateProjection.scala create mode 100644 sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala create mode 100644 sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/package.scala create mode 100644 sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/GeneratedEvaluationSuite.scala create mode 100644 sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/GeneratedMutableEvaluationSuite.scala create mode 100644 sql/core/src/main/scala/org/apache/spark/sql/execution/GeneratedAggregate.scala create mode 100644 sql/hive/src/test/resources/golden/case else null-0-8ef2f741400830ef889a9dd0c817fe3d create mode 100644 sql/hive/src/test/resources/golden/double case-0-f513687d17dcb18546fefa75000a52f2 create mode 100644 sql/hive/src/test/resources/golden/single case-0-c264e319c52f1840a32959d552b99e73 diff --git a/pom.xml b/pom.xml index 39538f9660623..ae97bf03c53a2 100644 --- a/pom.xml +++ b/pom.xml @@ -114,6 +114,7 @@ spark 2.10.4 2.10 + 2.0.1 0.18.1 shaded-protobuf org.spark-project.akka @@ -825,6 +826,15 @@ -target ${java.version} + + + + org.scalamacros + paradise_${scala.version} + ${scala.macros.version} + + diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index 0a6326e72297a..490fac3cc3646 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -167,6 +167,9 @@ object SparkBuild extends PomBuild { /* Enable unidoc only for the root spark project */ enable(Unidoc.settings)(spark) + /* Catalyst macro settings */ + enable(Catalyst.settings)(catalyst) + /* Spark SQL Core console settings */ enable(SQL.settings)(sql) @@ -189,10 +192,13 @@ object Flume { lazy val settings = sbtavro.SbtAvro.avroSettings } -object SQL { - +object Catalyst { lazy val settings = Seq( + addCompilerPlugin("org.scalamacros" % "paradise" % "2.0.1" cross CrossVersion.full)) +} +object SQL { + lazy val settings = Seq( initialCommands in console := """ |import org.apache.spark.sql.catalyst.analysis._ @@ -207,7 +213,6 @@ object SQL { |import org.apache.spark.sql.test.TestSQLContext._ |import org.apache.spark.sql.parquet.ParquetTestData""".stripMargin ) - } object Hive { diff --git a/sql/catalyst/pom.xml b/sql/catalyst/pom.xml index 531bfddbf237b..54fa96baa1e18 100644 --- a/sql/catalyst/pom.xml +++ b/sql/catalyst/pom.xml @@ -36,10 +36,19 @@ + + org.scala-lang + scala-compiler + org.scala-lang scala-reflect + + org.scalamacros + quasiquotes_${scala.binary.version} + ${scala.macros.version} + org.apache.spark spark-core_${scala.binary.version} diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala index 5c8c810d9135a..f44521d6381c9 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala @@ -202,7 +202,7 @@ package object dsl { // Protobuf terminology def required = a.withNullability(false) - def at(ordinal: Int) = BoundReference(ordinal, a) + def at(ordinal: Int) = BoundReference(ordinal, a.dataType, a.nullable) } } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/BoundAttribute.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/BoundAttribute.scala index 9ce1f01056462..a3ebec8082cbd 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/BoundAttribute.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/BoundAttribute.scala @@ -17,10 +17,12 @@ package org.apache.spark.sql.catalyst.expressions -import org.apache.spark.sql.catalyst.trees import org.apache.spark.sql.catalyst.errors.attachTree import org.apache.spark.sql.catalyst.plans.QueryPlan import org.apache.spark.sql.catalyst.rules.Rule +import org.apache.spark.sql.catalyst.types._ +import org.apache.spark.sql.catalyst.trees + import org.apache.spark.sql.Logging /** @@ -28,61 +30,27 @@ import org.apache.spark.sql.Logging * to be retrieved more efficiently. However, since operations like column pruning can change * the layout of intermediate tuples, BindReferences should be run after all such transformations. */ -case class BoundReference(ordinal: Int, baseReference: Attribute) - extends Attribute with trees.LeafNode[Expression] { +case class BoundReference(ordinal: Int, dataType: DataType, nullable: Boolean) + extends Expression with trees.LeafNode[Expression] { type EvaluatedType = Any - override def nullable = baseReference.nullable - override def dataType = baseReference.dataType - override def exprId = baseReference.exprId - override def qualifiers = baseReference.qualifiers - override def name = baseReference.name + override def references = Set.empty - override def newInstance = BoundReference(ordinal, baseReference.newInstance) - override def withNullability(newNullability: Boolean) = - BoundReference(ordinal, baseReference.withNullability(newNullability)) - override def withQualifiers(newQualifiers: Seq[String]) = - BoundReference(ordinal, baseReference.withQualifiers(newQualifiers)) - - override def toString = s"$baseReference:$ordinal" + override def toString = s"input[$ordinal]" override def eval(input: Row): Any = input(ordinal) } -/** - * Used to denote operators that do their own binding of attributes internally. - */ -trait NoBind { self: trees.TreeNode[_] => } - -class BindReferences[TreeNode <: QueryPlan[TreeNode]] extends Rule[TreeNode] { - import BindReferences._ - - def apply(plan: TreeNode): TreeNode = { - plan.transform { - case n: NoBind => n.asInstanceOf[TreeNode] - case leafNode if leafNode.children.isEmpty => leafNode - case unaryNode if unaryNode.children.size == 1 => unaryNode.transformExpressions { case e => - bindReference(e, unaryNode.children.head.output) - } - } - } -} - object BindReferences extends Logging { def bindReference[A <: Expression](expression: A, input: Seq[Attribute]): A = { expression.transform { case a: AttributeReference => attachTree(a, "Binding attribute") { val ordinal = input.indexWhere(_.exprId == a.exprId) if (ordinal == -1) { - // TODO: This fallback is required because some operators (such as ScriptTransform) - // produce new attributes that can't be bound. Likely the right thing to do is remove - // this rule and require all operators to explicitly bind to the input schema that - // they specify. - logger.debug(s"Couldn't find $a in ${input.mkString("[", ",", "]")}") - a + sys.error(s"Couldn't find $a in ${input.mkString("[", ",", "]")}") } else { - BoundReference(ordinal, a) + BoundReference(ordinal, a.dataType, a.nullable) } } }.asInstanceOf[A] // Kind of a hack, but safe. TODO: Tighten return type when possible. diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Projection.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Projection.scala index 2c71d2c7b3563..8fc5896974438 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Projection.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Projection.scala @@ -17,12 +17,13 @@ package org.apache.spark.sql.catalyst.expressions + /** - * Converts a [[Row]] to another Row given a sequence of expression that define each column of the - * new row. If the schema of the input row is specified, then the given expression will be bound to - * that schema. + * A [[Projection]] that is calculated by calling the `eval` of each of the specified expressions. + * @param expressions a sequence of expressions that determine the value of each column of the + * output row. */ -class Projection(expressions: Seq[Expression]) extends (Row => Row) { +class InterpretedProjection(expressions: Seq[Expression]) extends Projection { def this(expressions: Seq[Expression], inputSchema: Seq[Attribute]) = this(expressions.map(BindReferences.bindReference(_, inputSchema))) @@ -40,25 +41,25 @@ class Projection(expressions: Seq[Expression]) extends (Row => Row) { } /** - * Converts a [[Row]] to another Row given a sequence of expression that define each column of th - * new row. If the schema of the input row is specified, then the given expression will be bound to - * that schema. - * - * In contrast to a normal projection, a MutableProjection reuses the same underlying row object - * each time an input row is added. This significantly reduces the cost of calculating the - * projection, but means that it is not safe to hold on to a reference to a [[Row]] after `next()` - * has been called on the [[Iterator]] that produced it. Instead, the user must call `Row.copy()` - * and hold on to the returned [[Row]] before calling `next()`. + * A [[MutableProjection]] that is calculated by calling `eval` on each of the specified + * expressions. + * @param expressions a sequence of expressions that determine the value of each column of the + * output row. */ -case class MutableProjection(expressions: Seq[Expression]) extends (Row => Row) { +case class InterpretedMutableProjection(expressions: Seq[Expression]) extends MutableProjection { def this(expressions: Seq[Expression], inputSchema: Seq[Attribute]) = this(expressions.map(BindReferences.bindReference(_, inputSchema))) private[this] val exprArray = expressions.toArray - private[this] val mutableRow = new GenericMutableRow(exprArray.size) + private[this] var mutableRow: MutableRow = new GenericMutableRow(exprArray.size) def currentValue: Row = mutableRow - def apply(input: Row): Row = { + override def target(row: MutableRow): MutableProjection = { + mutableRow = row + this + } + + override def apply(input: Row): Row = { var i = 0 while (i < exprArray.length) { mutableRow(i) = exprArray(i).eval(input) @@ -76,6 +77,12 @@ class JoinedRow extends Row { private[this] var row1: Row = _ private[this] var row2: Row = _ + def this(left: Row, right: Row) = { + this() + row1 = left + row2 = right + } + /** Updates this JoinedRow to used point at two new base rows. Returns itself. */ def apply(r1: Row, r2: Row): Row = { row1 = r1 diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Row.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Row.scala index 74ae723686cfe..7470cb861b83b 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Row.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Row.scala @@ -88,15 +88,6 @@ trait MutableRow extends Row { def setByte(ordinal: Int, value: Byte) def setFloat(ordinal: Int, value: Float) def setString(ordinal: Int, value: String) - - /** - * Experimental - * - * Returns a mutable string builder for the specified column. A given row should return the - * result of any mutations made to the returned buffer next time getString is called for the same - * column. - */ - def getStringBuilder(ordinal: Int): StringBuilder } /** @@ -180,6 +171,35 @@ class GenericRow(protected[catalyst] val values: Array[Any]) extends Row { values(i).asInstanceOf[String] } + // Custom hashCode function that matches the efficient code generated version. + override def hashCode(): Int = { + var result: Int = 37 + + var i = 0 + while (i < values.length) { + val update: Int = + if (isNullAt(i)) { + 0 + } else { + apply(i) match { + case b: Boolean => if (b) 0 else 1 + case b: Byte => b.toInt + case s: Short => s.toInt + case i: Int => i + case l: Long => (l ^ (l >>> 32)).toInt + case f: Float => java.lang.Float.floatToIntBits(f) + case d: Double => + val b = java.lang.Double.doubleToLongBits(d) + (b ^ (b >>> 32)).toInt + case other => other.hashCode() + } + } + result = 37 * result + update + i += 1 + } + result + } + def copy() = this } @@ -187,8 +207,6 @@ class GenericMutableRow(size: Int) extends GenericRow(size) with MutableRow { /** No-arg constructor for serialization. */ def this() = this(0) - def getStringBuilder(ordinal: Int): StringBuilder = ??? - override def setBoolean(ordinal: Int,value: Boolean): Unit = { values(ordinal) = value } override def setByte(ordinal: Int,value: Byte): Unit = { values(ordinal) = value } override def setDouble(ordinal: Int,value: Double): Unit = { values(ordinal) = value } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ScalaUdf.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ScalaUdf.scala index 5e089f7618e0a..acddf5e9c7004 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ScalaUdf.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ScalaUdf.scala @@ -29,6 +29,7 @@ case class ScalaUdf(function: AnyRef, dataType: DataType, children: Seq[Expressi override def eval(input: Row): Any = { children.size match { + case 0 => function.asInstanceOf[() => Any]() case 1 => function.asInstanceOf[(Any) => Any](children(0).eval(input)) case 2 => function.asInstanceOf[(Any, Any) => Any]( diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala new file mode 100644 index 0000000000000..5b398695bf560 --- /dev/null +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala @@ -0,0 +1,468 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.expressions.codegen + +import com.google.common.cache.{CacheLoader, CacheBuilder} + +import scala.language.existentials + +import org.apache.spark.Logging +import org.apache.spark.sql.catalyst.expressions +import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.types._ + +/** + * A base class for generators of byte code to perform expression evaluation. Includes a set of + * helpers for referring to Catalyst types and building trees that perform evaluation of individual + * expressions. + */ +abstract class CodeGenerator[InType <: AnyRef, OutType <: AnyRef] extends Logging { + import scala.reflect.runtime.{universe => ru} + import scala.reflect.runtime.universe._ + + import scala.tools.reflect.ToolBox + + protected val toolBox = runtimeMirror(getClass.getClassLoader).mkToolBox() + + protected val rowType = typeOf[Row] + protected val mutableRowType = typeOf[MutableRow] + protected val genericRowType = typeOf[GenericRow] + protected val genericMutableRowType = typeOf[GenericMutableRow] + + protected val projectionType = typeOf[Projection] + protected val mutableProjectionType = typeOf[MutableProjection] + + private val curId = new java.util.concurrent.atomic.AtomicInteger() + private val javaSeparator = "$" + + /** + * Generates a class for a given input expression. Called when there is not cached code + * already available. + */ + protected def create(in: InType): OutType + + /** + * Canonicalizes an input expression. Used to avoid double caching expressions that differ only + * cosmetically. + */ + protected def canonicalize(in: InType): InType + + /** Binds an input expression to a given input schema */ + protected def bind(in: InType, inputSchema: Seq[Attribute]): InType + + /** + * A cache of generated classes. + * + * From the Guava Docs: A Cache is similar to ConcurrentMap, but not quite the same. The most + * fundamental difference is that a ConcurrentMap persists all elements that are added to it until + * they are explicitly removed. A Cache on the other hand is generally configured to evict entries + * automatically, in order to constrain its memory footprint + */ + protected val cache = CacheBuilder.newBuilder() + .maximumSize(1000) + .build( + new CacheLoader[InType, OutType]() { + override def load(in: InType): OutType = globalLock.synchronized { + create(in) + } + }) + + /** Generates the requested evaluator binding the given expression(s) to the inputSchema. */ + def apply(expressions: InType, inputSchema: Seq[Attribute]): OutType = + apply(bind(expressions, inputSchema)) + + /** Generates the requested evaluator given already bound expression(s). */ + def apply(expressions: InType): OutType = cache.get(canonicalize(expressions)) + + /** + * Returns a term name that is unique within this instance of a `CodeGenerator`. + * + * (Since we aren't in a macro context we do not seem to have access to the built in `freshName` + * function.) + */ + protected def freshName(prefix: String): TermName = { + newTermName(s"$prefix$javaSeparator${curId.getAndIncrement}") + } + + /** + * Scala ASTs for evaluating an [[Expression]] given a [[Row]] of input. + * + * @param code The sequence of statements required to evaluate the expression. + * @param nullTerm A term that holds a boolean value representing whether the expression evaluated + * to null. + * @param primitiveTerm A term for a possible primitive value of the result of the evaluation. Not + * valid if `nullTerm` is set to `false`. + * @param objectTerm A possibly boxed version of the result of evaluating this expression. + */ + protected case class EvaluatedExpression( + code: Seq[Tree], + nullTerm: TermName, + primitiveTerm: TermName, + objectTerm: TermName) + + /** + * Given an expression tree returns an [[EvaluatedExpression]], which contains Scala trees that + * can be used to determine the result of evaluating the expression on an input row. + */ + def expressionEvaluator(e: Expression): EvaluatedExpression = { + val primitiveTerm = freshName("primitiveTerm") + val nullTerm = freshName("nullTerm") + val objectTerm = freshName("objectTerm") + + implicit class Evaluate1(e: Expression) { + def castOrNull(f: TermName => Tree, dataType: DataType): Seq[Tree] = { + val eval = expressionEvaluator(e) + eval.code ++ + q""" + val $nullTerm = ${eval.nullTerm} + val $primitiveTerm = + if($nullTerm) + ${defaultPrimitive(dataType)} + else + ${f(eval.primitiveTerm)} + """.children + } + } + + implicit class Evaluate2(expressions: (Expression, Expression)) { + + /** + * Short hand for generating binary evaluation code, which depends on two sub-evaluations of + * the same type. If either of the sub-expressions is null, the result of this computation + * is assumed to be null. + * + * @param f a function from two primitive term names to a tree that evaluates them. + */ + def evaluate(f: (TermName, TermName) => Tree): Seq[Tree] = + evaluateAs(expressions._1.dataType)(f) + + def evaluateAs(resultType: DataType)(f: (TermName, TermName) => Tree): Seq[Tree] = { + // TODO: Right now some timestamp tests fail if we enforce this... + if (expressions._1.dataType != expressions._2.dataType) { + log.warn(s"${expressions._1.dataType} != ${expressions._2.dataType}") + } + + val eval1 = expressionEvaluator(expressions._1) + val eval2 = expressionEvaluator(expressions._2) + val resultCode = f(eval1.primitiveTerm, eval2.primitiveTerm) + + eval1.code ++ eval2.code ++ + q""" + val $nullTerm = ${eval1.nullTerm} || ${eval2.nullTerm} + val $primitiveTerm: ${termForType(resultType)} = + if($nullTerm) { + ${defaultPrimitive(resultType)} + } else { + $resultCode.asInstanceOf[${termForType(resultType)}] + } + """.children : Seq[Tree] + } + } + + val inputTuple = newTermName(s"i") + + // TODO: Skip generation of null handling code when expression are not nullable. + val primitiveEvaluation: PartialFunction[Expression, Seq[Tree]] = { + case b @ BoundReference(ordinal, dataType, nullable) => + val nullValue = q"$inputTuple.isNullAt($ordinal)" + q""" + val $nullTerm: Boolean = $nullValue + val $primitiveTerm: ${termForType(dataType)} = + if($nullTerm) + ${defaultPrimitive(dataType)} + else + ${getColumn(inputTuple, dataType, ordinal)} + """.children + + case expressions.Literal(null, dataType) => + q""" + val $nullTerm = true + val $primitiveTerm: ${termForType(dataType)} = null.asInstanceOf[${termForType(dataType)}] + """.children + + case expressions.Literal(value: Boolean, dataType) => + q""" + val $nullTerm = ${value == null} + val $primitiveTerm: ${termForType(dataType)} = $value + """.children + + case expressions.Literal(value: String, dataType) => + q""" + val $nullTerm = ${value == null} + val $primitiveTerm: ${termForType(dataType)} = $value + """.children + + case expressions.Literal(value: Int, dataType) => + q""" + val $nullTerm = ${value == null} + val $primitiveTerm: ${termForType(dataType)} = $value + """.children + + case expressions.Literal(value: Long, dataType) => + q""" + val $nullTerm = ${value == null} + val $primitiveTerm: ${termForType(dataType)} = $value + """.children + + case Cast(e @ BinaryType(), StringType) => + val eval = expressionEvaluator(e) + eval.code ++ + q""" + val $nullTerm = ${eval.nullTerm} + val $primitiveTerm = + if($nullTerm) + ${defaultPrimitive(StringType)} + else + new String(${eval.primitiveTerm}.asInstanceOf[Array[Byte]]) + """.children + + case Cast(child @ NumericType(), IntegerType) => + child.castOrNull(c => q"$c.toInt", IntegerType) + + case Cast(child @ NumericType(), LongType) => + child.castOrNull(c => q"$c.toLong", LongType) + + case Cast(child @ NumericType(), DoubleType) => + child.castOrNull(c => q"$c.toDouble", DoubleType) + + case Cast(child @ NumericType(), FloatType) => + child.castOrNull(c => q"$c.toFloat", IntegerType) + + // Special handling required for timestamps in hive test cases since the toString function + // does not match the expected output. + case Cast(e, StringType) if e.dataType != TimestampType => + val eval = expressionEvaluator(e) + eval.code ++ + q""" + val $nullTerm = ${eval.nullTerm} + val $primitiveTerm = + if($nullTerm) + ${defaultPrimitive(StringType)} + else + ${eval.primitiveTerm}.toString + """.children + + case EqualTo(e1, e2) => + (e1, e2).evaluateAs (BooleanType) { case (eval1, eval2) => q"$eval1 == $eval2" } + + /* TODO: Fix null semantics. + case In(e1, list) if !list.exists(!_.isInstanceOf[expressions.Literal]) => + val eval = expressionEvaluator(e1) + + val checks = list.map { + case expressions.Literal(v: String, dataType) => + q"if(${eval.primitiveTerm} == $v) return true" + case expressions.Literal(v: Int, dataType) => + q"if(${eval.primitiveTerm} == $v) return true" + } + + val funcName = newTermName(s"isIn${curId.getAndIncrement()}") + + q""" + def $funcName: Boolean = { + ..${eval.code} + if(${eval.nullTerm}) return false + ..$checks + return false + } + val $nullTerm = false + val $primitiveTerm = $funcName + """.children + */ + + case GreaterThan(e1 @ NumericType(), e2 @ NumericType()) => + (e1, e2).evaluateAs (BooleanType) { case (eval1, eval2) => q"$eval1 > $eval2" } + case GreaterThanOrEqual(e1 @ NumericType(), e2 @ NumericType()) => + (e1, e2).evaluateAs (BooleanType) { case (eval1, eval2) => q"$eval1 >= $eval2" } + case LessThan(e1 @ NumericType(), e2 @ NumericType()) => + (e1, e2).evaluateAs (BooleanType) { case (eval1, eval2) => q"$eval1 < $eval2" } + case LessThanOrEqual(e1 @ NumericType(), e2 @ NumericType()) => + (e1, e2).evaluateAs (BooleanType) { case (eval1, eval2) => q"$eval1 <= $eval2" } + + case And(e1, e2) => + val eval1 = expressionEvaluator(e1) + val eval2 = expressionEvaluator(e2) + + eval1.code ++ eval2.code ++ + q""" + var $nullTerm = false + var $primitiveTerm: ${termForType(BooleanType)} = false + + if ((!${eval1.nullTerm} && !${eval1.primitiveTerm}) || + (!${eval2.nullTerm} && !${eval2.primitiveTerm})) { + $nullTerm = false + $primitiveTerm = false + } else if (${eval1.nullTerm} || ${eval2.nullTerm} ) { + $nullTerm = true + } else { + $nullTerm = false + $primitiveTerm = true + } + """.children + + case Or(e1, e2) => + val eval1 = expressionEvaluator(e1) + val eval2 = expressionEvaluator(e2) + + eval1.code ++ eval2.code ++ + q""" + var $nullTerm = false + var $primitiveTerm: ${termForType(BooleanType)} = false + + if ((!${eval1.nullTerm} && ${eval1.primitiveTerm}) || + (!${eval2.nullTerm} && ${eval2.primitiveTerm})) { + $nullTerm = false + $primitiveTerm = true + } else if (${eval1.nullTerm} || ${eval2.nullTerm} ) { + $nullTerm = true + } else { + $nullTerm = false + $primitiveTerm = false + } + """.children + + case Not(child) => + // Uh, bad function name... + child.castOrNull(c => q"!$c", BooleanType) + + case Add(e1, e2) => (e1, e2) evaluate { case (eval1, eval2) => q"$eval1 + $eval2" } + case Subtract(e1, e2) => (e1, e2) evaluate { case (eval1, eval2) => q"$eval1 - $eval2" } + case Multiply(e1, e2) => (e1, e2) evaluate { case (eval1, eval2) => q"$eval1 * $eval2" } + case Divide(e1, e2) => (e1, e2) evaluate { case (eval1, eval2) => q"$eval1 / $eval2" } + + case IsNotNull(e) => + val eval = expressionEvaluator(e) + q""" + ..${eval.code} + var $nullTerm = false + var $primitiveTerm: ${termForType(BooleanType)} = !${eval.nullTerm} + """.children + + case IsNull(e) => + val eval = expressionEvaluator(e) + q""" + ..${eval.code} + var $nullTerm = false + var $primitiveTerm: ${termForType(BooleanType)} = ${eval.nullTerm} + """.children + + case c @ Coalesce(children) => + q""" + var $nullTerm = true + var $primitiveTerm: ${termForType(c.dataType)} = ${defaultPrimitive(c.dataType)} + """.children ++ + children.map { c => + val eval = expressionEvaluator(c) + q""" + if($nullTerm) { + ..${eval.code} + if(!${eval.nullTerm}) { + $nullTerm = false + $primitiveTerm = ${eval.primitiveTerm} + } + } + """ + } + + case i @ expressions.If(condition, trueValue, falseValue) => + val condEval = expressionEvaluator(condition) + val trueEval = expressionEvaluator(trueValue) + val falseEval = expressionEvaluator(falseValue) + + q""" + var $nullTerm = false + var $primitiveTerm: ${termForType(i.dataType)} = ${defaultPrimitive(i.dataType)} + ..${condEval.code} + if(!${condEval.nullTerm} && ${condEval.primitiveTerm}) { + ..${trueEval.code} + $nullTerm = ${trueEval.nullTerm} + $primitiveTerm = ${trueEval.primitiveTerm} + } else { + ..${falseEval.code} + $nullTerm = ${falseEval.nullTerm} + $primitiveTerm = ${falseEval.primitiveTerm} + } + """.children + } + + // If there was no match in the partial function above, we fall back on calling the interpreted + // expression evaluator. + val code: Seq[Tree] = + primitiveEvaluation.lift.apply(e).getOrElse { + log.debug(s"No rules to generate $e") + val tree = reify { e } + q""" + val $objectTerm = $tree.eval(i) + val $nullTerm = $objectTerm == null + val $primitiveTerm = $objectTerm.asInstanceOf[${termForType(e.dataType)}] + """.children + } + + EvaluatedExpression(code, nullTerm, primitiveTerm, objectTerm) + } + + protected def getColumn(inputRow: TermName, dataType: DataType, ordinal: Int) = { + dataType match { + case dt @ NativeType() => q"$inputRow.${accessorForType(dt)}($ordinal)" + case _ => q"$inputRow.apply($ordinal).asInstanceOf[${termForType(dataType)}]" + } + } + + protected def setColumn( + destinationRow: TermName, + dataType: DataType, + ordinal: Int, + value: TermName) = { + dataType match { + case dt @ NativeType() => q"$destinationRow.${mutatorForType(dt)}($ordinal, $value)" + case _ => q"$destinationRow.update($ordinal, $value)" + } + } + + protected def accessorForType(dt: DataType) = newTermName(s"get${primitiveForType(dt)}") + protected def mutatorForType(dt: DataType) = newTermName(s"set${primitiveForType(dt)}") + + protected def primitiveForType(dt: DataType) = dt match { + case IntegerType => "Int" + case LongType => "Long" + case ShortType => "Short" + case ByteType => "Byte" + case DoubleType => "Double" + case FloatType => "Float" + case BooleanType => "Boolean" + case StringType => "String" + } + + protected def defaultPrimitive(dt: DataType) = dt match { + case BooleanType => ru.Literal(Constant(false)) + case FloatType => ru.Literal(Constant(-1.0.toFloat)) + case StringType => ru.Literal(Constant("")) + case ShortType => ru.Literal(Constant(-1.toShort)) + case LongType => ru.Literal(Constant(1L)) + case ByteType => ru.Literal(Constant(-1.toByte)) + case DoubleType => ru.Literal(Constant(-1.toDouble)) + case DecimalType => ru.Literal(Constant(-1)) // Will get implicity converted as needed. + case IntegerType => ru.Literal(Constant(-1)) + case _ => ru.Literal(Constant(null)) + } + + protected def termForType(dt: DataType) = dt match { + case n: NativeType => n.tag + case _ => typeTag[Any] + } +} diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateMutableProjection.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateMutableProjection.scala new file mode 100644 index 0000000000000..a419fd7ecb39b --- /dev/null +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateMutableProjection.scala @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.expressions.codegen + +import org.apache.spark.sql.catalyst.expressions._ + +/** + * Generates byte code that produces a [[MutableRow]] object that can update itself based on a new + * input [[Row]] for a fixed set of [[Expression Expressions]]. + */ +object GenerateMutableProjection extends CodeGenerator[Seq[Expression], () => MutableProjection] { + import scala.reflect.runtime.{universe => ru} + import scala.reflect.runtime.universe._ + + val mutableRowName = newTermName("mutableRow") + + protected def canonicalize(in: Seq[Expression]): Seq[Expression] = + in.map(ExpressionCanonicalizer(_)) + + protected def bind(in: Seq[Expression], inputSchema: Seq[Attribute]): Seq[Expression] = + in.map(BindReferences.bindReference(_, inputSchema)) + + protected def create(expressions: Seq[Expression]): (() => MutableProjection) = { + val projectionCode = expressions.zipWithIndex.flatMap { case (e, i) => + val evaluationCode = expressionEvaluator(e) + + evaluationCode.code :+ + q""" + if(${evaluationCode.nullTerm}) + mutableRow.setNullAt($i) + else + ${setColumn(mutableRowName, e.dataType, i, evaluationCode.primitiveTerm)} + """ + } + + val code = + q""" + () => { new $mutableProjectionType { + + private[this] var $mutableRowName: $mutableRowType = + new $genericMutableRowType(${expressions.size}) + + def target(row: $mutableRowType): $mutableProjectionType = { + $mutableRowName = row + this + } + + /* Provide immutable access to the last projected row. */ + def currentValue: $rowType = mutableRow + + def apply(i: $rowType): $rowType = { + ..$projectionCode + mutableRow + } + } } + """ + + log.debug(s"code for ${expressions.mkString(",")}:\n$code") + toolBox.eval(code).asInstanceOf[() => MutableProjection] + } +} diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateOrdering.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateOrdering.scala new file mode 100644 index 0000000000000..4211998f7511a --- /dev/null +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateOrdering.scala @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.expressions.codegen + +import com.typesafe.scalalogging.slf4j.Logging +import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.types.{StringType, NumericType} + +/** + * Generates bytecode for an [[Ordering]] of [[Row Rows]] for a given set of + * [[Expression Expressions]]. + */ +object GenerateOrdering extends CodeGenerator[Seq[SortOrder], Ordering[Row]] with Logging { + import scala.reflect.runtime.{universe => ru} + import scala.reflect.runtime.universe._ + + protected def canonicalize(in: Seq[SortOrder]): Seq[SortOrder] = + in.map(ExpressionCanonicalizer(_).asInstanceOf[SortOrder]) + + protected def bind(in: Seq[SortOrder], inputSchema: Seq[Attribute]): Seq[SortOrder] = + in.map(BindReferences.bindReference(_, inputSchema)) + + protected def create(ordering: Seq[SortOrder]): Ordering[Row] = { + val a = newTermName("a") + val b = newTermName("b") + val comparisons = ordering.zipWithIndex.map { case (order, i) => + val evalA = expressionEvaluator(order.child) + val evalB = expressionEvaluator(order.child) + + val compare = order.child.dataType match { + case _: NumericType => + q""" + val comp = ${evalA.primitiveTerm} - ${evalB.primitiveTerm} + if(comp != 0) { + return ${if (order.direction == Ascending) q"comp.toInt" else q"-comp.toInt"} + } + """ + case StringType => + if (order.direction == Ascending) { + q"""return ${evalA.primitiveTerm}.compare(${evalB.primitiveTerm})""" + } else { + q"""return ${evalB.primitiveTerm}.compare(${evalA.primitiveTerm})""" + } + } + + q""" + i = $a + ..${evalA.code} + i = $b + ..${evalB.code} + if (${evalA.nullTerm} && ${evalB.nullTerm}) { + // Nothing + } else if (${evalA.nullTerm}) { + return ${if (order.direction == Ascending) q"-1" else q"1"} + } else if (${evalB.nullTerm}) { + return ${if (order.direction == Ascending) q"1" else q"-1"} + } else { + $compare + } + """ + } + + val q"class $orderingName extends $orderingType { ..$body }" = reify { + class SpecificOrdering extends Ordering[Row] { + val o = ordering + } + }.tree.children.head + + val code = q""" + class $orderingName extends $orderingType { + ..$body + def compare(a: $rowType, b: $rowType): Int = { + var i: $rowType = null // Holds current row being evaluated. + ..$comparisons + return 0 + } + } + new $orderingName() + """ + logger.debug(s"Generated Ordering: $code") + toolBox.eval(code).asInstanceOf[Ordering[Row]] + } +} diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GeneratePredicate.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GeneratePredicate.scala new file mode 100644 index 0000000000000..2a0935c790cf3 --- /dev/null +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GeneratePredicate.scala @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.expressions.codegen + +import org.apache.spark.sql.catalyst.expressions._ + +/** + * Generates bytecode that evaluates a boolean [[Expression]] on a given input [[Row]]. + */ +object GeneratePredicate extends CodeGenerator[Expression, (Row) => Boolean] { + import scala.reflect.runtime.{universe => ru} + import scala.reflect.runtime.universe._ + + protected def canonicalize(in: Expression): Expression = ExpressionCanonicalizer(in) + + protected def bind(in: Expression, inputSchema: Seq[Attribute]): Expression = + BindReferences.bindReference(in, inputSchema) + + protected def create(predicate: Expression): ((Row) => Boolean) = { + val cEval = expressionEvaluator(predicate) + + val code = + q""" + (i: $rowType) => { + ..${cEval.code} + if (${cEval.nullTerm}) false else ${cEval.primitiveTerm} + } + """ + + log.debug(s"Generated predicate '$predicate':\n$code") + toolBox.eval(code).asInstanceOf[Row => Boolean] + } +} diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateProjection.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateProjection.scala new file mode 100644 index 0000000000000..77fa02c13de30 --- /dev/null +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateProjection.scala @@ -0,0 +1,219 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.expressions.codegen + +import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.types._ + + +/** + * Generates bytecode that produces a new [[Row]] object based on a fixed set of input + * [[Expression Expressions]] and a given input [[Row]]. The returned [[Row]] object is custom + * generated based on the output types of the [[Expression]] to avoid boxing of primitive values. + */ +object GenerateProjection extends CodeGenerator[Seq[Expression], Projection] { + import scala.reflect.runtime.{universe => ru} + import scala.reflect.runtime.universe._ + + protected def canonicalize(in: Seq[Expression]): Seq[Expression] = + in.map(ExpressionCanonicalizer(_)) + + protected def bind(in: Seq[Expression], inputSchema: Seq[Attribute]): Seq[Expression] = + in.map(BindReferences.bindReference(_, inputSchema)) + + // Make Mutablility optional... + protected def create(expressions: Seq[Expression]): Projection = { + val tupleLength = ru.Literal(Constant(expressions.length)) + val lengthDef = q"final val length = $tupleLength" + + /* TODO: Configurable... + val nullFunctions = + q""" + private final val nullSet = new org.apache.spark.util.collection.BitSet(length) + final def setNullAt(i: Int) = nullSet.set(i) + final def isNullAt(i: Int) = nullSet.get(i) + """ + */ + + val nullFunctions = + q""" + private[this] var nullBits = new Array[Boolean](${expressions.size}) + final def setNullAt(i: Int) = { nullBits(i) = true } + final def isNullAt(i: Int) = nullBits(i) + """.children + + val tupleElements = expressions.zipWithIndex.flatMap { + case (e, i) => + val elementName = newTermName(s"c$i") + val evaluatedExpression = expressionEvaluator(e) + val iLit = ru.Literal(Constant(i)) + + q""" + var ${newTermName(s"c$i")}: ${termForType(e.dataType)} = _ + { + ..${evaluatedExpression.code} + if(${evaluatedExpression.nullTerm}) + setNullAt($iLit) + else + $elementName = ${evaluatedExpression.primitiveTerm} + } + """.children : Seq[Tree] + } + + val iteratorFunction = { + val allColumns = (0 until expressions.size).map { i => + val iLit = ru.Literal(Constant(i)) + q"if(isNullAt($iLit)) { null } else { ${newTermName(s"c$i")} }" + } + q"final def iterator = Iterator[Any](..$allColumns)" + } + + val accessorFailure = q"""scala.sys.error("Invalid ordinal:" + i)""" + val applyFunction = { + val cases = (0 until expressions.size).map { i => + val ordinal = ru.Literal(Constant(i)) + val elementName = newTermName(s"c$i") + val iLit = ru.Literal(Constant(i)) + + q"if(i == $ordinal) { if(isNullAt($i)) return null else return $elementName }" + } + q"final def apply(i: Int): Any = { ..$cases; $accessorFailure }" + } + + val updateFunction = { + val cases = expressions.zipWithIndex.map {case (e, i) => + val ordinal = ru.Literal(Constant(i)) + val elementName = newTermName(s"c$i") + val iLit = ru.Literal(Constant(i)) + + q""" + if(i == $ordinal) { + if(value == null) { + setNullAt(i) + } else { + $elementName = value.asInstanceOf[${termForType(e.dataType)}] + return + } + }""" + } + q"final def update(i: Int, value: Any): Unit = { ..$cases; $accessorFailure }" + } + + val specificAccessorFunctions = NativeType.all.map { dataType => + val ifStatements = expressions.zipWithIndex.flatMap { + case (e, i) if e.dataType == dataType => + val elementName = newTermName(s"c$i") + // TODO: The string of ifs gets pretty inefficient as the row grows in size. + // TODO: Optional null checks? + q"if(i == $i) return $elementName" :: Nil + case _ => Nil + } + + q""" + final def ${accessorForType(dataType)}(i: Int):${termForType(dataType)} = { + ..$ifStatements; + $accessorFailure + }""" + } + + val specificMutatorFunctions = NativeType.all.map { dataType => + val ifStatements = expressions.zipWithIndex.flatMap { + case (e, i) if e.dataType == dataType => + val elementName = newTermName(s"c$i") + // TODO: The string of ifs gets pretty inefficient as the row grows in size. + // TODO: Optional null checks? + q"if(i == $i) { $elementName = value; return }" :: Nil + case _ => Nil + } + + q""" + final def ${mutatorForType(dataType)}(i: Int, value: ${termForType(dataType)}): Unit = { + ..$ifStatements; + $accessorFailure + }""" + } + + val hashValues = expressions.zipWithIndex.map { case (e,i) => + val elementName = newTermName(s"c$i") + val nonNull = e.dataType match { + case BooleanType => q"if ($elementName) 0 else 1" + case ByteType | ShortType | IntegerType => q"$elementName.toInt" + case LongType => q"($elementName ^ ($elementName >>> 32)).toInt" + case FloatType => q"java.lang.Float.floatToIntBits($elementName)" + case DoubleType => + q"{ val b = java.lang.Double.doubleToLongBits($elementName); (b ^ (b >>>32)).toInt }" + case _ => q"$elementName.hashCode" + } + q"if (isNullAt($i)) 0 else $nonNull" + } + + val hashUpdates: Seq[Tree] = hashValues.map(v => q"""result = 37 * result + $v""": Tree) + + val hashCodeFunction = + q""" + override def hashCode(): Int = { + var result: Int = 37 + ..$hashUpdates + result + } + """ + + val columnChecks = (0 until expressions.size).map { i => + val elementName = newTermName(s"c$i") + q"if (this.$elementName != specificType.$elementName) return false" + } + + val equalsFunction = + q""" + override def equals(other: Any): Boolean = other match { + case specificType: SpecificRow => + ..$columnChecks + return true + case other => super.equals(other) + } + """ + + val copyFunction = + q""" + final def copy() = new $genericRowType(this.toArray) + """ + + val classBody = + nullFunctions ++ ( + lengthDef +: + iteratorFunction +: + applyFunction +: + updateFunction +: + equalsFunction +: + hashCodeFunction +: + copyFunction +: + (tupleElements ++ specificAccessorFunctions ++ specificMutatorFunctions)) + + val code = q""" + final class SpecificRow(i: $rowType) extends $mutableRowType { + ..$classBody + } + + new $projectionType { def apply(r: $rowType) = new SpecificRow(r) } + """ + + log.debug( + s"MutableRow, initExprs: ${expressions.mkString(",")} code:\n${toolBox.typeCheck(code)}") + toolBox.eval(code).asInstanceOf[Projection] + } +} diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala new file mode 100644 index 0000000000000..80c7dfd376c96 --- /dev/null +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.expressions + +import org.apache.spark.annotation.DeveloperApi +import org.apache.spark.sql.catalyst.rules +import org.apache.spark.sql.catalyst.util + +/** + * A collection of generators that build custom bytecode at runtime for performing the evaluation + * of catalyst expression. + */ +package object codegen { + + /** + * A lock to protect invoking the scala compiler at runtime, since it is not thread safe in Scala + * 2.10. + */ + protected[codegen] val globalLock = org.apache.spark.sql.catalyst.ScalaReflectionLock + + /** Canonicalizes an expression so those that differ only by names can reuse the same code. */ + object ExpressionCanonicalizer extends rules.RuleExecutor[Expression] { + val batches = + Batch("CleanExpressions", FixedPoint(20), CleanExpressions) :: Nil + + object CleanExpressions extends rules.Rule[Expression] { + def apply(e: Expression): Expression = e transform { + case Alias(c, _) => c + } + } + } + + /** + * :: DeveloperApi :: + * Dumps the bytecode from a class to the screen using javap. + */ + @DeveloperApi + object DumpByteCode { + import scala.sys.process._ + val dumpDirectory = util.getTempFilePath("sparkSqlByteCode") + dumpDirectory.mkdir() + + def apply(obj: Any): Unit = { + val generatedClass = obj.getClass + val classLoader = + generatedClass + .getClassLoader + .asInstanceOf[scala.tools.nsc.interpreter.AbstractFileClassLoader] + val generatedBytes = classLoader.classBytes(generatedClass.getName) + + val packageDir = new java.io.File(dumpDirectory, generatedClass.getPackage.getName) + if (!packageDir.exists()) { packageDir.mkdir() } + + val classFile = + new java.io.File(packageDir, generatedClass.getName.split("\\.").last + ".class") + + val outfile = new java.io.FileOutputStream(classFile) + outfile.write(generatedBytes) + outfile.close() + + println( + s"javap -p -v -classpath ${dumpDirectory.getCanonicalPath} ${generatedClass.getName}".!!) + } + } +} diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/package.scala index b6f2451b52e1f..55d95991c5f11 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/package.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/package.scala @@ -47,4 +47,30 @@ package org.apache.spark.sql.catalyst * ==Evaluation== * The result of expressions can be evaluated using the `Expression.apply(Row)` method. */ -package object expressions +package object expressions { + + /** + * Converts a [[Row]] to another Row given a sequence of expression that define each column of the + * new row. If the schema of the input row is specified, then the given expression will be bound + * to that schema. + */ + abstract class Projection extends (Row => Row) + + /** + * Converts a [[Row]] to another Row given a sequence of expression that define each column of the + * new row. If the schema of the input row is specified, then the given expression will be bound + * to that schema. + * + * In contrast to a normal projection, a MutableProjection reuses the same underlying row object + * each time an input row is added. This significantly reduces the cost of calculating the + * projection, but means that it is not safe to hold on to a reference to a [[Row]] after `next()` + * has been called on the [[Iterator]] that produced it. Instead, the user must call `Row.copy()` + * and hold on to the returned [[Row]] before calling `next()`. + */ + abstract class MutableProjection extends Projection { + def currentValue: Row + + /** Uses the given row to store the output of the projection. */ + def target(row: MutableRow): MutableProjection + } +} diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala index 06b94a98d3cd0..5976b0ddf3e03 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala @@ -23,6 +23,9 @@ import org.apache.spark.sql.catalyst.types.BooleanType object InterpretedPredicate { + def apply(expression: Expression, inputSchema: Seq[Attribute]): (Row => Boolean) = + apply(BindReferences.bindReference(expression, inputSchema)) + def apply(expression: Expression): (Row => Boolean) = { (r: Row) => expression.eval(r).asInstanceOf[Boolean] } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/package.scala new file mode 100644 index 0000000000000..3b3e206055cfc --- /dev/null +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/package.scala @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +package object catalyst { + /** + * A JVM-global lock that should be used to prevent thread safety issues when using things in + * scala.reflect.*. Note that Scala Reflection API is made thread-safe in 2.11, but not yet for + * 2.10.* builds. See SI-6240 for more details. + */ + protected[catalyst] object ScalaReflectionLock +} diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala index 026692abe067d..418f8686bfe5c 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala @@ -104,6 +104,77 @@ object PhysicalOperation extends PredicateHelper { } } +/** + * Matches a logical aggregation that can be performed on distributed data in two steps. The first + * operates on the data in each partition performing partial aggregation for each group. The second + * occurs after the shuffle and completes the aggregation. + * + * This pattern will only match if all aggregate expressions can be computed partially and will + * return the rewritten aggregation expressions for both phases. + * + * The returned values for this match are as follows: + * - Grouping attributes for the final aggregation. + * - Aggregates for the final aggregation. + * - Grouping expressions for the partial aggregation. + * - Partial aggregate expressions. + * - Input to the aggregation. + */ +object PartialAggregation { + type ReturnType = + (Seq[Attribute], Seq[NamedExpression], Seq[Expression], Seq[NamedExpression], LogicalPlan) + + def unapply(plan: LogicalPlan): Option[ReturnType] = plan match { + case logical.Aggregate(groupingExpressions, aggregateExpressions, child) => + // Collect all aggregate expressions. + val allAggregates = + aggregateExpressions.flatMap(_ collect { case a: AggregateExpression => a}) + // Collect all aggregate expressions that can be computed partially. + val partialAggregates = + aggregateExpressions.flatMap(_ collect { case p: PartialAggregate => p}) + + // Only do partial aggregation if supported by all aggregate expressions. + if (allAggregates.size == partialAggregates.size) { + // Create a map of expressions to their partial evaluations for all aggregate expressions. + val partialEvaluations: Map[Long, SplitEvaluation] = + partialAggregates.map(a => (a.id, a.asPartial)).toMap + + // We need to pass all grouping expressions though so the grouping can happen a second + // time. However some of them might be unnamed so we alias them allowing them to be + // referenced in the second aggregation. + val namedGroupingExpressions: Map[Expression, NamedExpression] = groupingExpressions.map { + case n: NamedExpression => (n, n) + case other => (other, Alias(other, "PartialGroup")()) + }.toMap + + // Replace aggregations with a new expression that computes the result from the already + // computed partial evaluations and grouping values. + val rewrittenAggregateExpressions = aggregateExpressions.map(_.transformUp { + case e: Expression if partialEvaluations.contains(e.id) => + partialEvaluations(e.id).finalEvaluation + case e: Expression if namedGroupingExpressions.contains(e) => + namedGroupingExpressions(e).toAttribute + }).asInstanceOf[Seq[NamedExpression]] + + val partialComputation = + (namedGroupingExpressions.values ++ + partialEvaluations.values.flatMap(_.partialEvaluations)).toSeq + + val namedGroupingAttributes = namedGroupingExpressions.values.map(_.toAttribute).toSeq + + Some( + (namedGroupingAttributes, + rewrittenAggregateExpressions, + groupingExpressions, + partialComputation, + child)) + } else { + None + } + case _ => None + } +} + + /** * A pattern that finds joins with equality conditions that can be evaluated using equi-join. */ diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala index ac85f95b52a2f..888cb08e95f06 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala @@ -112,7 +112,7 @@ abstract class LeafNode extends LogicalPlan with trees.LeafNode[LogicalPlan] { self: Product => override lazy val statistics: Statistics = - throw new UnsupportedOperationException("default leaf nodes don't have meaningful Statistics") + throw new UnsupportedOperationException(s"LeafNode $nodeName must implement statistics.") // Leaf nodes by definition cannot reference any input attributes. override def references = Set.empty diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/commands.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/commands.scala index a357c6ffb8977..481a5a4f212b2 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/commands.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/commands.scala @@ -35,7 +35,7 @@ abstract class Command extends LeafNode { */ case class NativeCommand(cmd: String) extends Command { override def output = - Seq(BoundReference(0, AttributeReference("result", StringType, nullable = false)())) + Seq(AttributeReference("result", StringType, nullable = false)()) } /** @@ -43,7 +43,7 @@ case class NativeCommand(cmd: String) extends Command { */ case class SetCommand(key: Option[String], value: Option[String]) extends Command { override def output = Seq( - BoundReference(1, AttributeReference("", StringType, nullable = false)())) + AttributeReference("", StringType, nullable = false)()) } /** @@ -52,7 +52,7 @@ case class SetCommand(key: Option[String], value: Option[String]) extends Comman */ case class ExplainCommand(plan: LogicalPlan) extends Command { override def output = - Seq(BoundReference(0, AttributeReference("plan", StringType, nullable = false)())) + Seq(AttributeReference("plan", StringType, nullable = false)()) } /** @@ -71,7 +71,7 @@ case class DescribeCommand( isExtended: Boolean) extends Command { override def output = Seq( // Column names are based on Hive. - BoundReference(0, AttributeReference("col_name", StringType, nullable = false)()), - BoundReference(1, AttributeReference("data_type", StringType, nullable = false)()), - BoundReference(2, AttributeReference("comment", StringType, nullable = false)())) + AttributeReference("col_name", StringType, nullable = false)(), + AttributeReference("data_type", StringType, nullable = false)(), + AttributeReference("comment", StringType, nullable = false)()) } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/rules/RuleExecutor.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/rules/RuleExecutor.scala index e32adb76fe146..e300bdbececbd 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/rules/RuleExecutor.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/rules/RuleExecutor.scala @@ -72,7 +72,10 @@ abstract class RuleExecutor[TreeType <: TreeNode[_]] extends Logging { } iteration += 1 if (iteration > batch.strategy.maxIterations) { - logger.info(s"Max iterations ($iteration) reached for batch ${batch.name}") + // Only log if this is a rule that is supposed to run more than once. + if (iteration != 2) { + logger.info(s"Max iterations (${iteration - 1}) reached for batch ${batch.name}") + } continue = false } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/types/dataTypes.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/types/dataTypes.scala index cd4b5e9c1b529..71808f76d632b 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/types/dataTypes.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/types/dataTypes.scala @@ -23,16 +23,13 @@ import scala.reflect.ClassTag import scala.reflect.runtime.universe.{typeTag, TypeTag, runtimeMirror} import scala.util.parsing.combinator.RegexParsers +import org.apache.spark.sql.catalyst.ScalaReflectionLock import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference, Expression} import org.apache.spark.util.Utils /** - * A JVM-global lock that should be used to prevent thread safety issues when using things in - * scala.reflect.*. Note that Scala Reflection API is made thread-safe in 2.11, but not yet for - * 2.10.* builds. See SI-6240 for more details. + * Utility functions for working with DataTypes. */ -protected[catalyst] object ScalaReflectionLock - object DataType extends RegexParsers { protected lazy val primitiveType: Parser[DataType] = "StringType" ^^^ StringType | @@ -99,6 +96,13 @@ abstract class DataType { case object NullType extends DataType +object NativeType { + def all = Seq( + IntegerType, BooleanType, LongType, DoubleType, FloatType, ShortType, ByteType, StringType) + + def unapply(dt: DataType): Boolean = all.contains(dt) +} + trait PrimitiveType extends DataType { override def isPrimitive = true } @@ -149,6 +153,10 @@ abstract class NumericType extends NativeType with PrimitiveType { val numeric: Numeric[JvmType] } +object NumericType { + def unapply(e: Expression): Boolean = e.dataType.isInstanceOf[NumericType] +} + /** Matcher for any expressions that evaluate to [[IntegralType]]s */ object IntegralType { def unapply(a: Expression): Boolean = a match { diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvaluationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvaluationSuite.scala index 58f8c341e6676..999c9fff38d60 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvaluationSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvaluationSuite.scala @@ -29,7 +29,11 @@ import org.apache.spark.sql.catalyst.dsl.expressions._ class ExpressionEvaluationSuite extends FunSuite { test("literals") { - assert((Literal(1) + Literal(1)).eval(null) === 2) + checkEvaluation(Literal(1), 1) + checkEvaluation(Literal(true), true) + checkEvaluation(Literal(0L), 0L) + checkEvaluation(Literal("test"), "test") + checkEvaluation(Literal(1) + Literal(1), 2) } /** @@ -61,10 +65,8 @@ class ExpressionEvaluationSuite extends FunSuite { test("3VL Not") { notTrueTable.foreach { case (v, answer) => - val expr = ! Literal(v, BooleanType) - val result = expr.eval(null) - if (result != answer) - fail(s"$expr should not evaluate to $result, expected: $answer") } + checkEvaluation(!Literal(v, BooleanType), answer) + } } booleanLogicTest("AND", _ && _, @@ -127,6 +129,13 @@ class ExpressionEvaluationSuite extends FunSuite { } } + test("IN") { + checkEvaluation(In(Literal(1), Seq(Literal(1), Literal(2))), true) + checkEvaluation(In(Literal(2), Seq(Literal(1), Literal(2))), true) + checkEvaluation(In(Literal(3), Seq(Literal(1), Literal(2))), false) + checkEvaluation(In(Literal(1), Seq(Literal(1), Literal(2))) && In(Literal(2), Seq(Literal(1), Literal(2))), true) + } + test("LIKE literal Regular Expression") { checkEvaluation(Literal(null, StringType).like("a"), null) checkEvaluation(Literal("a", StringType).like(Literal(null, StringType)), null) @@ -232,21 +241,21 @@ class ExpressionEvaluationSuite extends FunSuite { checkEvaluation(Literal(false) cast IntegerType, 0) checkEvaluation(Cast(Literal(1) cast BooleanType, IntegerType), 1) checkEvaluation(Cast(Literal(0) cast BooleanType, IntegerType), 0) - checkEvaluation("23" cast DoubleType, 23) + checkEvaluation("23" cast DoubleType, 23d) checkEvaluation("23" cast IntegerType, 23) - checkEvaluation("23" cast FloatType, 23) - checkEvaluation("23" cast DecimalType, 23) - checkEvaluation("23" cast ByteType, 23) - checkEvaluation("23" cast ShortType, 23) + checkEvaluation("23" cast FloatType, 23f) + checkEvaluation("23" cast DecimalType, 23: BigDecimal) + checkEvaluation("23" cast ByteType, 23.toByte) + checkEvaluation("23" cast ShortType, 23.toShort) checkEvaluation("2012-12-11" cast DoubleType, null) checkEvaluation(Literal(123) cast IntegerType, 123) - checkEvaluation(Literal(23d) + Cast(true, DoubleType), 24) + checkEvaluation(Literal(23d) + Cast(true, DoubleType), 24d) checkEvaluation(Literal(23) + Cast(true, IntegerType), 24) - checkEvaluation(Literal(23f) + Cast(true, FloatType), 24) - checkEvaluation(Literal(BigDecimal(23)) + Cast(true, DecimalType), 24) - checkEvaluation(Literal(23.toByte) + Cast(true, ByteType), 24) - checkEvaluation(Literal(23.toShort) + Cast(true, ShortType), 24) + checkEvaluation(Literal(23f) + Cast(true, FloatType), 24f) + checkEvaluation(Literal(BigDecimal(23)) + Cast(true, DecimalType), 24: BigDecimal) + checkEvaluation(Literal(23.toByte) + Cast(true, ByteType), 24.toByte) + checkEvaluation(Literal(23.toShort) + Cast(true, ShortType), 24.toShort) intercept[Exception] {evaluate(Literal(1) cast BinaryType, null)} @@ -391,21 +400,21 @@ class ExpressionEvaluationSuite extends FunSuite { val typeMap = MapType(StringType, StringType) val typeArray = ArrayType(StringType) - checkEvaluation(GetItem(BoundReference(3, AttributeReference("c", typeMap)()), + checkEvaluation(GetItem(BoundReference(3, typeMap, true), Literal("aa")), "bb", row) checkEvaluation(GetItem(Literal(null, typeMap), Literal("aa")), null, row) checkEvaluation(GetItem(Literal(null, typeMap), Literal(null, StringType)), null, row) - checkEvaluation(GetItem(BoundReference(3, AttributeReference("c", typeMap)()), + checkEvaluation(GetItem(BoundReference(3, typeMap, true), Literal(null, StringType)), null, row) - checkEvaluation(GetItem(BoundReference(4, AttributeReference("c", typeArray)()), + checkEvaluation(GetItem(BoundReference(4, typeArray, true), Literal(1)), "bb", row) checkEvaluation(GetItem(Literal(null, typeArray), Literal(1)), null, row) checkEvaluation(GetItem(Literal(null, typeArray), Literal(null, IntegerType)), null, row) - checkEvaluation(GetItem(BoundReference(4, AttributeReference("c", typeArray)()), + checkEvaluation(GetItem(BoundReference(4, typeArray, true), Literal(null, IntegerType)), null, row) - checkEvaluation(GetField(BoundReference(2, AttributeReference("c", typeS)()), "a"), "aa", row) + checkEvaluation(GetField(BoundReference(2, typeS, nullable = true), "a"), "aa", row) checkEvaluation(GetField(Literal(null, typeS), "a"), null, row) val typeS_notNullable = StructType( @@ -413,10 +422,8 @@ class ExpressionEvaluationSuite extends FunSuite { :: StructField("b", StringType, nullable = false) :: Nil ) - assert(GetField(BoundReference(2, - AttributeReference("c", typeS)()), "a").nullable === true) - assert(GetField(BoundReference(2, - AttributeReference("c", typeS_notNullable, nullable = false)()), "a").nullable === false) + assert(GetField(BoundReference(2,typeS, nullable = true), "a").nullable === true) + assert(GetField(BoundReference(2, typeS_notNullable, nullable = false), "a").nullable === false) assert(GetField(Literal(null, typeS), "a").nullable === true) assert(GetField(Literal(null, typeS_notNullable), "a").nullable === true) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/GeneratedEvaluationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/GeneratedEvaluationSuite.scala new file mode 100644 index 0000000000000..245a2e148030c --- /dev/null +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/GeneratedEvaluationSuite.scala @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.optimizer + +import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.dsl.expressions._ +import org.apache.spark.sql.catalyst.expressions.codegen._ + +/** + * Overrides our expression evaluation tests to use code generation for evaluation. + */ +class GeneratedEvaluationSuite extends ExpressionEvaluationSuite { + override def checkEvaluation( + expression: Expression, + expected: Any, + inputRow: Row = EmptyRow): Unit = { + val plan = try { + GenerateMutableProjection(Alias(expression, s"Optimized($expression)")() :: Nil)() + } catch { + case e: Throwable => + val evaluated = GenerateProjection.expressionEvaluator(expression) + fail( + s""" + |Code generation of $expression failed: + |${evaluated.code.mkString("\n")} + |$e + """.stripMargin) + } + + val actual = plan(inputRow).apply(0) + if(actual != expected) { + val input = if(inputRow == EmptyRow) "" else s", input: $inputRow" + fail(s"Incorrect Evaluation: $expression, actual: $actual, expected: $expected$input") + } + } + + + test("multithreaded eval") { + import scala.concurrent._ + import ExecutionContext.Implicits.global + import scala.concurrent.duration._ + + val futures = (1 to 20).map { _ => + future { + GeneratePredicate(EqualTo(Literal(1), Literal(1))) + GenerateProjection(EqualTo(Literal(1), Literal(1)) :: Nil) + GenerateMutableProjection(EqualTo(Literal(1), Literal(1)) :: Nil) + GenerateOrdering(Add(Literal(1), Literal(1)).asc :: Nil) + } + } + + futures.foreach(Await.result(_, 10.seconds)) + } +} diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/GeneratedMutableEvaluationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/GeneratedMutableEvaluationSuite.scala new file mode 100644 index 0000000000000..887aabb1d5fb4 --- /dev/null +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/GeneratedMutableEvaluationSuite.scala @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.catalyst.optimizer + +import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.dsl.expressions._ +import org.apache.spark.sql.catalyst.expressions.codegen._ + +/** + * Overrides our expression evaluation tests to use generated code on mutable rows. + */ +class GeneratedMutableEvaluationSuite extends ExpressionEvaluationSuite { + override def checkEvaluation( + expression: Expression, + expected: Any, + inputRow: Row = EmptyRow): Unit = { + lazy val evaluated = GenerateProjection.expressionEvaluator(expression) + + val plan = try { + GenerateProjection(Alias(expression, s"Optimized($expression)")() :: Nil) + } catch { + case e: Throwable => + fail( + s""" + |Code generation of $expression failed: + |${evaluated.code.mkString("\n")} + |$e + """.stripMargin) + } + + val actual = plan(inputRow) + val expectedRow = new GenericRow(Array[Any](expected)) + if (actual.hashCode() != expectedRow.hashCode()) { + fail( + s""" + |Mismatched hashCodes for values: $actual, $expectedRow + |Hash Codes: ${actual.hashCode()} != ${expectedRow.hashCode()} + |${evaluated.code.mkString("\n")} + """.stripMargin) + } + if (actual != expectedRow) { + val input = if(inputRow == EmptyRow) "" else s", input: $inputRow" + fail(s"Incorrect Evaluation: $expression, actual: $actual, expected: $expected$input") + } + } +} diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/CombiningLimitsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/CombiningLimitsSuite.scala index 4896f1b955f01..e2ae0d25db1a5 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/CombiningLimitsSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/CombiningLimitsSuite.scala @@ -27,9 +27,9 @@ class CombiningLimitsSuite extends PlanTest { object Optimize extends RuleExecutor[LogicalPlan] { val batches = - Batch("Combine Limit", FixedPoint(2), + Batch("Combine Limit", FixedPoint(10), CombineLimits) :: - Batch("Constant Folding", FixedPoint(3), + Batch("Constant Folding", FixedPoint(10), NullPropagation, ConstantFolding, BooleanSimplification) :: Nil diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala index 5d85a0fd4eebb..2d407077be303 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala @@ -24,8 +24,11 @@ import scala.collection.JavaConverters._ object SQLConf { val COMPRESS_CACHED = "spark.sql.inMemoryColumnarStorage.compressed" val AUTO_BROADCASTJOIN_THRESHOLD = "spark.sql.autoBroadcastJoinThreshold" - val SHUFFLE_PARTITIONS = "spark.sql.shuffle.partitions" val DEFAULT_SIZE_IN_BYTES = "spark.sql.defaultSizeInBytes" + val AUTO_CONVERT_JOIN_SIZE = "spark.sql.auto.convert.join.size" + val SHUFFLE_PARTITIONS = "spark.sql.shuffle.partitions" + val JOIN_BROADCAST_TABLES = "spark.sql.join.broadcastTables" + val CODEGEN_ENABLED = "spark.sql.codegen" object Deprecated { val MAPRED_REDUCE_TASKS = "mapred.reduce.tasks" @@ -56,6 +59,18 @@ trait SQLConf { /** Number of partitions to use for shuffle operators. */ private[spark] def numShufflePartitions: Int = get(SHUFFLE_PARTITIONS, "200").toInt + /** + * When set to true, Spark SQL will use the Scala compiler at runtime to generate custom bytecode + * that evaluates expressions found in queries. In general this custom code runs much faster + * than interpreted evaluation, but there are significant start-up costs due to compilation. + * As a result codegen is only benificial when queries run for a long time, or when the same + * expressions are used multiple times. + * + * Defaults to false as this feature is currently experimental. + */ + private[spark] def codegenEnabled: Boolean = + if (get(CODEGEN_ENABLED, "false") == "true") true else false + /** * Upper bound on the sizes (in bytes) of the tables qualified for the auto conversion to * a broadcast value during the physical executions of join operations. Setting this to -1 @@ -111,5 +126,5 @@ trait SQLConf { private[spark] def clear() { settings.clear() } - } + diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala index c2bdef732372c..e4b6810180994 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala @@ -94,7 +94,7 @@ class SQLContext(@transient val sparkContext: SparkContext) * @group userf */ def parquetFile(path: String): SchemaRDD = - new SchemaRDD(this, parquet.ParquetRelation(path, Some(sparkContext.hadoopConfiguration))) + new SchemaRDD(this, parquet.ParquetRelation(path, Some(sparkContext.hadoopConfiguration), this)) /** * Loads a JSON file (one object per line), returning the result as a [[SchemaRDD]]. @@ -160,7 +160,8 @@ class SQLContext(@transient val sparkContext: SparkContext) conf: Configuration = new Configuration()): SchemaRDD = { new SchemaRDD( this, - ParquetRelation.createEmpty(path, ScalaReflection.attributesFor[A], allowExisting, conf)) + ParquetRelation.createEmpty( + path, ScalaReflection.attributesFor[A], allowExisting, conf, this)) } /** @@ -228,12 +229,14 @@ class SQLContext(@transient val sparkContext: SparkContext) val sqlContext: SQLContext = self + def codegenEnabled = self.codegenEnabled + def numPartitions = self.numShufflePartitions val strategies: Seq[Strategy] = CommandStrategy(self) :: TakeOrdered :: - PartialAggregation :: + HashAggregation :: LeftSemiJoin :: HashJoin :: InMemoryScans :: @@ -291,27 +294,30 @@ class SQLContext(@transient val sparkContext: SparkContext) protected[sql] lazy val emptyResult = sparkContext.parallelize(Seq.empty[Row], 1) /** - * Prepares a planned SparkPlan for execution by binding references to specific ordinals, and - * inserting shuffle operations as needed. + * Prepares a planned SparkPlan for execution by inserting shuffle operations as needed. */ @transient protected[sql] val prepareForExecution = new RuleExecutor[SparkPlan] { val batches = - Batch("Add exchange", Once, AddExchange(self)) :: - Batch("Prepare Expressions", Once, new BindReferences[SparkPlan]) :: Nil + Batch("Add exchange", Once, AddExchange(self)) :: Nil } /** + * :: DeveloperApi :: * The primary workflow for executing relational queries using Spark. Designed to allow easy * access to the intermediate phases of query execution for developers. */ + @DeveloperApi protected abstract class QueryExecution { def logical: LogicalPlan lazy val analyzed = analyzer(logical) lazy val optimizedPlan = optimizer(analyzed) // TODO: Don't just pick the first one... - lazy val sparkPlan = planner(optimizedPlan).next() + lazy val sparkPlan = { + SparkPlan.currentContext.set(self) + planner(optimizedPlan).next() + } // executedPlan should not be used to initialize any SparkPlan. It should be // only used for execution. lazy val executedPlan: SparkPlan = prepareForExecution(sparkPlan) @@ -331,6 +337,9 @@ class SQLContext(@transient val sparkContext: SparkContext) |${stringOrError(optimizedPlan)} |== Physical Plan == |${stringOrError(executedPlan)} + |Code Generation: ${executedPlan.codegenEnabled} + |== RDD == + |${stringOrError(toRdd.toDebugString)} """.stripMargin.trim } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSQLContext.scala index 806097c917b91..85726bae54911 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSQLContext.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSQLContext.scala @@ -72,7 +72,7 @@ class JavaSQLContext(val sqlContext: SQLContext) { conf: Configuration = new Configuration()): JavaSchemaRDD = { new JavaSchemaRDD( sqlContext, - ParquetRelation.createEmpty(path, getSchema(beanClass), allowExisting, conf)) + ParquetRelation.createEmpty(path, getSchema(beanClass), allowExisting, conf, sqlContext)) } /** @@ -101,7 +101,7 @@ class JavaSQLContext(val sqlContext: SQLContext) { def parquetFile(path: String): JavaSchemaRDD = new JavaSchemaRDD( sqlContext, - ParquetRelation(path, Some(sqlContext.sparkContext.hadoopConfiguration))) + ParquetRelation(path, Some(sqlContext.sparkContext.hadoopConfiguration), sqlContext)) /** * Loads a JSON file (one object per line), returning the result as a [[JavaSchemaRDD]]. diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/Aggregate.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/Aggregate.scala index c1ced8bfa404a..463a1d32d7fd7 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Aggregate.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Aggregate.scala @@ -42,8 +42,8 @@ case class Aggregate( partial: Boolean, groupingExpressions: Seq[Expression], aggregateExpressions: Seq[NamedExpression], - child: SparkPlan)(@transient sqlContext: SQLContext) - extends UnaryNode with NoBind { + child: SparkPlan) + extends UnaryNode { override def requiredChildDistribution = if (partial) { @@ -56,8 +56,6 @@ case class Aggregate( } } - override def otherCopyArgs = sqlContext :: Nil - // HACK: Generators don't correctly preserve their output through serializations so we grab // out child's output attributes statically here. private[this] val childOutput = child.output @@ -138,7 +136,7 @@ case class Aggregate( i += 1 } } - val resultProjection = new Projection(resultExpressions, computedSchema) + val resultProjection = new InterpretedProjection(resultExpressions, computedSchema) val aggregateResults = new GenericMutableRow(computedAggregates.length) var i = 0 @@ -152,7 +150,7 @@ case class Aggregate( } else { child.execute().mapPartitions { iter => val hashTable = new HashMap[Row, Array[AggregateFunction]] - val groupingProjection = new MutableProjection(groupingExpressions, childOutput) + val groupingProjection = new InterpretedMutableProjection(groupingExpressions, childOutput) var currentRow: Row = null while (iter.hasNext) { @@ -175,7 +173,8 @@ case class Aggregate( private[this] val hashTableIter = hashTable.entrySet().iterator() private[this] val aggregateResults = new GenericMutableRow(computedAggregates.length) private[this] val resultProjection = - new MutableProjection(resultExpressions, computedSchema ++ namedGroups.map(_._2)) + new InterpretedMutableProjection( + resultExpressions, computedSchema ++ namedGroups.map(_._2)) private[this] val joinedRow = new JoinedRow override final def hasNext: Boolean = hashTableIter.hasNext diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala index 00010ef6e798a..392a7f3be3904 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala @@ -22,7 +22,7 @@ import org.apache.spark.{HashPartitioner, RangePartitioner, SparkConf} import org.apache.spark.rdd.ShuffledRDD import org.apache.spark.sql.{SQLContext, Row} import org.apache.spark.sql.catalyst.errors.attachTree -import org.apache.spark.sql.catalyst.expressions.{NoBind, MutableProjection, RowOrdering} +import org.apache.spark.sql.catalyst.expressions.RowOrdering import org.apache.spark.sql.catalyst.plans.physical._ import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.util.MutablePair @@ -31,7 +31,7 @@ import org.apache.spark.util.MutablePair * :: DeveloperApi :: */ @DeveloperApi -case class Exchange(newPartitioning: Partitioning, child: SparkPlan) extends UnaryNode with NoBind { +case class Exchange(newPartitioning: Partitioning, child: SparkPlan) extends UnaryNode { override def outputPartitioning = newPartitioning @@ -42,7 +42,9 @@ case class Exchange(newPartitioning: Partitioning, child: SparkPlan) extends Una case HashPartitioning(expressions, numPartitions) => // TODO: Eliminate redundant expressions in grouping key and value. val rdd = child.execute().mapPartitions { iter => - val hashExpressions = new MutableProjection(expressions, child.output) + @transient val hashExpressions = + newMutableProjection(expressions, child.output)() + val mutablePair = new MutablePair[Row, Row]() iter.map(r => mutablePair.update(hashExpressions(r), r)) } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/Generate.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/Generate.scala index 47b3d00262dbb..c386fd121c5de 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Generate.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Generate.scala @@ -47,23 +47,26 @@ case class Generate( } } - override def output = + // This must be a val since the generator output expr ids are not preserved by serialization. + override val output = if (join) child.output ++ generatorOutput else generatorOutput + val boundGenerator = BindReferences.bindReference(generator, child.output) + override def execute() = { if (join) { child.execute().mapPartitions { iter => val nullValues = Seq.fill(generator.output.size)(Literal(null)) // Used to produce rows with no matches when outer = true. val outerProjection = - new Projection(child.output ++ nullValues, child.output) + newProjection(child.output ++ nullValues, child.output) val joinProjection = - new Projection(child.output ++ generator.output, child.output ++ generator.output) + newProjection(child.output ++ generator.output, child.output ++ generator.output) val joinedRow = new JoinedRow iter.flatMap {row => - val outputRows = generator.eval(row) + val outputRows = boundGenerator.eval(row) if (outer && outputRows.isEmpty) { outerProjection(row) :: Nil } else { @@ -72,7 +75,7 @@ case class Generate( } } } else { - child.execute().mapPartitions(iter => iter.flatMap(row => generator.eval(row))) + child.execute().mapPartitions(iter => iter.flatMap(row => boundGenerator.eval(row))) } } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/GeneratedAggregate.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/GeneratedAggregate.scala new file mode 100644 index 0000000000000..4a26934c49c93 --- /dev/null +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/GeneratedAggregate.scala @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution + +import org.apache.spark.annotation.DeveloperApi +import org.apache.spark.sql.SQLContext +import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.plans.physical._ +import org.apache.spark.sql.catalyst.types._ + +case class AggregateEvaluation( + schema: Seq[Attribute], + initialValues: Seq[Expression], + update: Seq[Expression], + result: Expression) + +/** + * :: DeveloperApi :: + * Alternate version of aggregation that leverages projection and thus code generation. + * Aggregations are converted into a set of projections from a aggregation buffer tuple back onto + * itself. Currently only used for simple aggregations like SUM, COUNT, or AVERAGE are supported. + * + * @param partial if true then aggregation is done partially on local data without shuffling to + * ensure all values where `groupingExpressions` are equal are present. + * @param groupingExpressions expressions that are evaluated to determine grouping. + * @param aggregateExpressions expressions that are computed for each group. + * @param child the input data source. + */ +@DeveloperApi +case class GeneratedAggregate( + partial: Boolean, + groupingExpressions: Seq[Expression], + aggregateExpressions: Seq[NamedExpression], + child: SparkPlan) + extends UnaryNode { + + override def requiredChildDistribution = + if (partial) { + UnspecifiedDistribution :: Nil + } else { + if (groupingExpressions == Nil) { + AllTuples :: Nil + } else { + ClusteredDistribution(groupingExpressions) :: Nil + } + } + + override def output = aggregateExpressions.map(_.toAttribute) + + override def execute() = { + val aggregatesToCompute = aggregateExpressions.flatMap { a => + a.collect { case agg: AggregateExpression => agg} + } + + val computeFunctions = aggregatesToCompute.map { + case c @ Count(expr) => + val currentCount = AttributeReference("currentCount", LongType, nullable = false)() + val initialValue = Literal(0L) + val updateFunction = If(IsNotNull(expr), Add(currentCount, Literal(1L)), currentCount) + val result = currentCount + + AggregateEvaluation(currentCount :: Nil, initialValue :: Nil, updateFunction :: Nil, result) + + case Sum(expr) => + val currentSum = AttributeReference("currentSum", expr.dataType, nullable = false)() + val initialValue = Cast(Literal(0L), expr.dataType) + + // Coalasce avoids double calculation... + // but really, common sub expression elimination would be better.... + val updateFunction = Coalesce(Add(expr, currentSum) :: currentSum :: Nil) + val result = currentSum + + AggregateEvaluation(currentSum :: Nil, initialValue :: Nil, updateFunction :: Nil, result) + + case a @ Average(expr) => + val currentCount = AttributeReference("currentCount", LongType, nullable = false)() + val currentSum = AttributeReference("currentSum", expr.dataType, nullable = false)() + val initialCount = Literal(0L) + val initialSum = Cast(Literal(0L), expr.dataType) + val updateCount = If(IsNotNull(expr), Add(currentCount, Literal(1L)), currentCount) + val updateSum = Coalesce(Add(expr, currentSum) :: currentSum :: Nil) + + val result = Divide(Cast(currentSum, DoubleType), Cast(currentCount, DoubleType)) + + AggregateEvaluation( + currentCount :: currentSum :: Nil, + initialCount :: initialSum :: Nil, + updateCount :: updateSum :: Nil, + result + ) + } + + val computationSchema = computeFunctions.flatMap(_.schema) + + val resultMap: Map[Long, Expression] = aggregatesToCompute.zip(computeFunctions).map { + case (agg, func) => agg.id -> func.result + }.toMap + + val namedGroups = groupingExpressions.zipWithIndex.map { + case (ne: NamedExpression, _) => (ne, ne) + case (e, i) => (e, Alias(e, s"GroupingExpr$i")()) + } + + val groupMap: Map[Expression, Attribute] = + namedGroups.map { case (k, v) => k -> v.toAttribute}.toMap + + // The set of expressions that produce the final output given the aggregation buffer and the + // grouping expressions. + val resultExpressions = aggregateExpressions.map(_.transform { + case e: Expression if resultMap.contains(e.id) => resultMap(e.id) + case e: Expression if groupMap.contains(e) => groupMap(e) + }) + + child.execute().mapPartitions { iter => + // Builds a new custom class for holding the results of aggregation for a group. + val initialValues = computeFunctions.flatMap(_.initialValues) + val newAggregationBuffer = newProjection(initialValues, child.output) + log.info(s"Initial values: ${initialValues.mkString(",")}") + + // A projection that computes the group given an input tuple. + val groupProjection = newProjection(groupingExpressions, child.output) + log.info(s"Grouping Projection: ${groupingExpressions.mkString(",")}") + + // A projection that is used to update the aggregate values for a group given a new tuple. + // This projection should be targeted at the current values for the group and then applied + // to a joined row of the current values with the new input row. + val updateExpressions = computeFunctions.flatMap(_.update) + val updateSchema = computeFunctions.flatMap(_.schema) ++ child.output + val updateProjection = newMutableProjection(updateExpressions, updateSchema)() + log.info(s"Update Expressions: ${updateExpressions.mkString(",")}") + + // A projection that produces the final result, given a computation. + val resultProjectionBuilder = + newMutableProjection( + resultExpressions, + (namedGroups.map(_._2.toAttribute) ++ computationSchema).toSeq) + log.info(s"Result Projection: ${resultExpressions.mkString(",")}") + + val joinedRow = new JoinedRow + + if (groupingExpressions.isEmpty) { + // TODO: Codegening anything other than the updateProjection is probably over kill. + val buffer = newAggregationBuffer(EmptyRow).asInstanceOf[MutableRow] + var currentRow: Row = null + updateProjection.target(buffer) + + while (iter.hasNext) { + currentRow = iter.next() + updateProjection(joinedRow(buffer, currentRow)) + } + + val resultProjection = resultProjectionBuilder() + Iterator(resultProjection(buffer)) + } else { + val buffers = new java.util.HashMap[Row, MutableRow]() + + var currentRow: Row = null + while (iter.hasNext) { + currentRow = iter.next() + val currentGroup = groupProjection(currentRow) + var currentBuffer = buffers.get(currentGroup) + if (currentBuffer == null) { + currentBuffer = newAggregationBuffer(EmptyRow).asInstanceOf[MutableRow] + buffers.put(currentGroup, currentBuffer) + } + // Target the projection at the current aggregation buffer and then project the updated + // values. + updateProjection.target(currentBuffer)(joinedRow(currentBuffer, currentRow)) + } + + new Iterator[Row] { + private[this] val resultIterator = buffers.entrySet.iterator() + private[this] val resultProjection = resultProjectionBuilder() + + def hasNext = resultIterator.hasNext + + def next() = { + val currentGroup = resultIterator.next() + resultProjection(joinedRow(currentGroup.getKey, currentGroup.getValue)) + } + } + } + } + } +} diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala index 77c874d0315ee..21cbbc9772a00 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala @@ -18,22 +18,55 @@ package org.apache.spark.sql.execution import org.apache.spark.annotation.DeveloperApi +import org.apache.spark.Logging import org.apache.spark.rdd.RDD -import org.apache.spark.sql.{Logging, Row, SQLContext} + + +import org.apache.spark.sql.SQLContext import org.apache.spark.sql.catalyst.trees import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation -import org.apache.spark.sql.catalyst.expressions.GenericRow +import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.expressions.codegen._ import org.apache.spark.sql.catalyst.plans.QueryPlan import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.plans.physical._ + +object SparkPlan { + protected[sql] val currentContext = new ThreadLocal[SQLContext]() +} + /** * :: DeveloperApi :: */ @DeveloperApi -abstract class SparkPlan extends QueryPlan[SparkPlan] with Logging { +abstract class SparkPlan extends QueryPlan[SparkPlan] with Logging with Serializable { self: Product => + /** + * A handle to the SQL Context that was used to create this plan. Since many operators need + * access to the sqlContext for RDD operations or configuration this field is automatically + * populated by the query planning infrastructure. + */ + @transient + protected val sqlContext = SparkPlan.currentContext.get() + + protected def sparkContext = sqlContext.sparkContext + + // sqlContext will be null when we are being deserialized on the slaves. In this instance + // the value of codegenEnabled will be set by the desserializer after the constructor has run. + val codegenEnabled: Boolean = if (sqlContext != null) { + sqlContext.codegenEnabled + } else { + false + } + + /** Overridden make copy also propogates sqlContext to copied plan. */ + override def makeCopy(newArgs: Array[AnyRef]): this.type = { + SparkPlan.currentContext.set(sqlContext) + super.makeCopy(newArgs) + } + // TODO: Move to `DistributedPlan` /** Specifies how data is partitioned across different nodes in the cluster. */ def outputPartitioning: Partitioning = UnknownPartitioning(0) // TODO: WRONG WIDTH! @@ -51,8 +84,46 @@ abstract class SparkPlan extends QueryPlan[SparkPlan] with Logging { */ def executeCollect(): Array[Row] = execute().map(_.copy()).collect() - protected def buildRow(values: Seq[Any]): Row = - new GenericRow(values.toArray) + protected def newProjection( + expressions: Seq[Expression], inputSchema: Seq[Attribute]): Projection = { + log.debug( + s"Creating Projection: $expressions, inputSchema: $inputSchema, codegen:$codegenEnabled") + if (codegenEnabled) { + GenerateProjection(expressions, inputSchema) + } else { + new InterpretedProjection(expressions, inputSchema) + } + } + + protected def newMutableProjection( + expressions: Seq[Expression], + inputSchema: Seq[Attribute]): () => MutableProjection = { + log.debug( + s"Creating MutableProj: $expressions, inputSchema: $inputSchema, codegen:$codegenEnabled") + if(codegenEnabled) { + GenerateMutableProjection(expressions, inputSchema) + } else { + () => new InterpretedMutableProjection(expressions, inputSchema) + } + } + + + protected def newPredicate( + expression: Expression, inputSchema: Seq[Attribute]): (Row) => Boolean = { + if (codegenEnabled) { + GeneratePredicate(expression, inputSchema) + } else { + InterpretedPredicate(expression, inputSchema) + } + } + + protected def newOrdering(order: Seq[SortOrder], inputSchema: Seq[Attribute]): Ordering[Row] = { + if (codegenEnabled) { + GenerateOrdering(order, inputSchema) + } else { + new RowOrdering(order, inputSchema) + } + } } /** diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala index 404d48ae05b45..5f1fe99f75c9d 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala @@ -17,8 +17,6 @@ package org.apache.spark.sql.execution -import scala.util.Try - import org.apache.spark.sql.{SQLContext, execution} import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.planning._ @@ -41,7 +39,7 @@ private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] { // no predicate can be evaluated by matching hash keys case logical.Join(left, right, LeftSemi, condition) => execution.LeftSemiJoinBNL( - planLater(left), planLater(right), condition)(sqlContext) :: Nil + planLater(left), planLater(right), condition) :: Nil case _ => Nil } } @@ -60,6 +58,7 @@ private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] { * will instead be used to decide the build side in a [[execution.ShuffledHashJoin]]. */ object HashJoin extends Strategy with PredicateHelper { + private[this] def makeBroadcastHashJoin( leftKeys: Seq[Expression], rightKeys: Seq[Expression], @@ -68,24 +67,24 @@ private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] { condition: Option[Expression], side: BuildSide) = { val broadcastHashJoin = execution.BroadcastHashJoin( - leftKeys, rightKeys, side, planLater(left), planLater(right))(sqlContext) + leftKeys, rightKeys, side, planLater(left), planLater(right)) condition.map(Filter(_, broadcastHashJoin)).getOrElse(broadcastHashJoin) :: Nil } def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { case ExtractEquiJoinKeys(Inner, leftKeys, rightKeys, condition, left, right) - if Try(sqlContext.autoBroadcastJoinThreshold > 0 && - right.statistics.sizeInBytes <= sqlContext.autoBroadcastJoinThreshold).getOrElse(false) => + if sqlContext.autoBroadcastJoinThreshold > 0 && + right.statistics.sizeInBytes <= sqlContext.autoBroadcastJoinThreshold => makeBroadcastHashJoin(leftKeys, rightKeys, left, right, condition, BuildRight) case ExtractEquiJoinKeys(Inner, leftKeys, rightKeys, condition, left, right) - if Try(sqlContext.autoBroadcastJoinThreshold > 0 && - left.statistics.sizeInBytes <= sqlContext.autoBroadcastJoinThreshold).getOrElse(false) => + if sqlContext.autoBroadcastJoinThreshold > 0 && + left.statistics.sizeInBytes <= sqlContext.autoBroadcastJoinThreshold => makeBroadcastHashJoin(leftKeys, rightKeys, left, right, condition, BuildLeft) case ExtractEquiJoinKeys(Inner, leftKeys, rightKeys, condition, left, right) => val buildSide = - if (Try(right.statistics.sizeInBytes <= left.statistics.sizeInBytes).getOrElse(false)) { + if (right.statistics.sizeInBytes <= left.statistics.sizeInBytes) { BuildRight } else { BuildLeft @@ -99,65 +98,65 @@ private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] { } } - object PartialAggregation extends Strategy { + object HashAggregation extends Strategy { def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { - case logical.Aggregate(groupingExpressions, aggregateExpressions, child) => - // Collect all aggregate expressions. - val allAggregates = - aggregateExpressions.flatMap(_ collect { case a: AggregateExpression => a }) - // Collect all aggregate expressions that can be computed partially. - val partialAggregates = - aggregateExpressions.flatMap(_ collect { case p: PartialAggregate => p }) - - // Only do partial aggregation if supported by all aggregate expressions. - if (allAggregates.size == partialAggregates.size) { - // Create a map of expressions to their partial evaluations for all aggregate expressions. - val partialEvaluations: Map[Long, SplitEvaluation] = - partialAggregates.map(a => (a.id, a.asPartial)).toMap - - // We need to pass all grouping expressions though so the grouping can happen a second - // time. However some of them might be unnamed so we alias them allowing them to be - // referenced in the second aggregation. - val namedGroupingExpressions: Map[Expression, NamedExpression] = groupingExpressions.map { - case n: NamedExpression => (n, n) - case other => (other, Alias(other, "PartialGroup")()) - }.toMap + // Aggregations that can be performed in two phases, before and after the shuffle. - // Replace aggregations with a new expression that computes the result from the already - // computed partial evaluations and grouping values. - val rewrittenAggregateExpressions = aggregateExpressions.map(_.transformUp { - case e: Expression if partialEvaluations.contains(e.id) => - partialEvaluations(e.id).finalEvaluation - case e: Expression if namedGroupingExpressions.contains(e) => - namedGroupingExpressions(e).toAttribute - }).asInstanceOf[Seq[NamedExpression]] - - val partialComputation = - (namedGroupingExpressions.values ++ - partialEvaluations.values.flatMap(_.partialEvaluations)).toSeq - - // Construct two phased aggregation. - execution.Aggregate( + // Cases where all aggregates can be codegened. + case PartialAggregation( + namedGroupingAttributes, + rewrittenAggregateExpressions, + groupingExpressions, + partialComputation, + child) + if canBeCodeGened( + allAggregates(partialComputation) ++ + allAggregates(rewrittenAggregateExpressions)) && + codegenEnabled => + execution.GeneratedAggregate( partial = false, - namedGroupingExpressions.values.map(_.toAttribute).toSeq, + namedGroupingAttributes, rewrittenAggregateExpressions, - execution.Aggregate( + execution.GeneratedAggregate( partial = true, groupingExpressions, partialComputation, - planLater(child))(sqlContext))(sqlContext) :: Nil - } else { - Nil - } + planLater(child))) :: Nil + + // Cases where some aggregate can not be codegened + case PartialAggregation( + namedGroupingAttributes, + rewrittenAggregateExpressions, + groupingExpressions, + partialComputation, + child) => + execution.Aggregate( + partial = false, + namedGroupingAttributes, + rewrittenAggregateExpressions, + execution.Aggregate( + partial = true, + groupingExpressions, + partialComputation, + planLater(child))) :: Nil + case _ => Nil } + + def canBeCodeGened(aggs: Seq[AggregateExpression]) = !aggs.exists { + case _: Sum | _: Count => false + case _ => true + } + + def allAggregates(exprs: Seq[Expression]) = + exprs.flatMap(_.collect { case a: AggregateExpression => a }) } object BroadcastNestedLoopJoin extends Strategy { def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { case logical.Join(left, right, joinType, condition) => execution.BroadcastNestedLoopJoin( - planLater(left), planLater(right), joinType, condition)(sqlContext) :: Nil + planLater(left), planLater(right), joinType, condition) :: Nil case _ => Nil } } @@ -176,16 +175,10 @@ private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] { protected lazy val singleRowRdd = sparkContext.parallelize(Seq(new GenericRow(Array[Any]()): Row), 1) - def convertToCatalyst(a: Any): Any = a match { - case s: Seq[Any] => s.map(convertToCatalyst) - case p: Product => new GenericRow(p.productIterator.map(convertToCatalyst).toArray) - case other => other - } - object TakeOrdered extends Strategy { def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { case logical.Limit(IntegerLiteral(limit), logical.Sort(order, child)) => - execution.TakeOrdered(limit, order, planLater(child))(sqlContext) :: Nil + execution.TakeOrdered(limit, order, planLater(child)) :: Nil case _ => Nil } } @@ -195,11 +188,11 @@ private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] { // TODO: need to support writing to other types of files. Unify the below code paths. case logical.WriteToFile(path, child) => val relation = - ParquetRelation.create(path, child, sparkContext.hadoopConfiguration) + ParquetRelation.create(path, child, sparkContext.hadoopConfiguration, sqlContext) // Note: overwrite=false because otherwise the metadata we just created will be deleted - InsertIntoParquetTable(relation, planLater(child), overwrite=false)(sqlContext) :: Nil + InsertIntoParquetTable(relation, planLater(child), overwrite = false) :: Nil case logical.InsertIntoTable(table: ParquetRelation, partition, child, overwrite) => - InsertIntoParquetTable(table, planLater(child), overwrite)(sqlContext) :: Nil + InsertIntoParquetTable(table, planLater(child), overwrite) :: Nil case PhysicalOperation(projectList, filters: Seq[Expression], relation: ParquetRelation) => val prunePushedDownFilters = if (sparkContext.conf.getBoolean(ParquetFilters.PARQUET_FILTER_PUSHDOWN_ENABLED, true)) { @@ -228,7 +221,7 @@ private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] { projectList, filters, prunePushedDownFilters, - ParquetTableScan(_, relation, filters)(sqlContext)) :: Nil + ParquetTableScan(_, relation, filters)) :: Nil case _ => Nil } @@ -266,20 +259,19 @@ private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] { case logical.Filter(condition, child) => execution.Filter(condition, planLater(child)) :: Nil case logical.Aggregate(group, agg, child) => - execution.Aggregate(partial = false, group, agg, planLater(child))(sqlContext) :: Nil + execution.Aggregate(partial = false, group, agg, planLater(child)) :: Nil case logical.Sample(fraction, withReplacement, seed, child) => execution.Sample(fraction, withReplacement, seed, planLater(child)) :: Nil case logical.LocalRelation(output, data) => - val dataAsRdd = - sparkContext.parallelize(data.map(r => - new GenericRow(r.productIterator.map(convertToCatalyst).toArray): Row)) - execution.ExistingRdd(output, dataAsRdd) :: Nil + ExistingRdd( + output, + ExistingRdd.productToRowRdd(sparkContext.parallelize(data, numPartitions))) :: Nil case logical.Limit(IntegerLiteral(limit), child) => - execution.Limit(limit, planLater(child))(sqlContext) :: Nil + execution.Limit(limit, planLater(child)) :: Nil case Unions(unionChildren) => - execution.Union(unionChildren.map(planLater))(sqlContext) :: Nil - case logical.Except(left,right) => - execution.Except(planLater(left),planLater(right)) :: Nil + execution.Union(unionChildren.map(planLater)) :: Nil + case logical.Except(left, right) => + execution.Except(planLater(left), planLater(right)) :: Nil case logical.Intersect(left, right) => execution.Intersect(planLater(left), planLater(right)) :: Nil case logical.Generate(generator, join, outer, _, child) => diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala index 966d8f95fc83c..174eda8f1a72c 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala @@ -37,9 +37,11 @@ import org.apache.spark.util.MutablePair case class Project(projectList: Seq[NamedExpression], child: SparkPlan) extends UnaryNode { override def output = projectList.map(_.toAttribute) - override def execute() = child.execute().mapPartitions { iter => - @transient val reusableProjection = new MutableProjection(projectList) - iter.map(reusableProjection) + @transient lazy val buildProjection = newMutableProjection(projectList, child.output) + + def execute() = child.execute().mapPartitions { iter => + val resuableProjection = buildProjection() + iter.map(resuableProjection) } } @@ -50,8 +52,10 @@ case class Project(projectList: Seq[NamedExpression], child: SparkPlan) extends case class Filter(condition: Expression, child: SparkPlan) extends UnaryNode { override def output = child.output - override def execute() = child.execute().mapPartitions { iter => - iter.filter(condition.eval(_).asInstanceOf[Boolean]) + @transient lazy val conditionEvaluator = newPredicate(condition, child.output) + + def execute() = child.execute().mapPartitions { iter => + iter.filter(conditionEvaluator) } } @@ -72,12 +76,10 @@ case class Sample(fraction: Double, withReplacement: Boolean, seed: Long, child: * :: DeveloperApi :: */ @DeveloperApi -case class Union(children: Seq[SparkPlan])(@transient sqlContext: SQLContext) extends SparkPlan { +case class Union(children: Seq[SparkPlan]) extends SparkPlan { // TODO: attributes output by union should be distinct for nullability purposes override def output = children.head.output - override def execute() = sqlContext.sparkContext.union(children.map(_.execute())) - - override def otherCopyArgs = sqlContext :: Nil + override def execute() = sparkContext.union(children.map(_.execute())) } /** @@ -89,13 +91,11 @@ case class Union(children: Seq[SparkPlan])(@transient sqlContext: SQLContext) ex * repartition all the data to a single partition to compute the global limit. */ @DeveloperApi -case class Limit(limit: Int, child: SparkPlan)(@transient sqlContext: SQLContext) +case class Limit(limit: Int, child: SparkPlan) extends UnaryNode { // TODO: Implement a partition local limit, and use a strategy to generate the proper limit plan: // partition local limit -> exchange into one partition -> partition local limit again - override def otherCopyArgs = sqlContext :: Nil - override def output = child.output /** @@ -161,20 +161,18 @@ case class Limit(limit: Int, child: SparkPlan)(@transient sqlContext: SQLContext * Spark's top operator does the opposite in ordering so we name it TakeOrdered to avoid confusion. */ @DeveloperApi -case class TakeOrdered(limit: Int, sortOrder: Seq[SortOrder], child: SparkPlan) - (@transient sqlContext: SQLContext) extends UnaryNode { - override def otherCopyArgs = sqlContext :: Nil +case class TakeOrdered(limit: Int, sortOrder: Seq[SortOrder], child: SparkPlan) extends UnaryNode { override def output = child.output - @transient - lazy val ordering = new RowOrdering(sortOrder) + val ordering = new RowOrdering(sortOrder, child.output) + // TODO: Is this copying for no reason? override def executeCollect() = child.execute().map(_.copy()).takeOrdered(limit)(ordering) // TODO: Terminal split should be implemented differently from non-terminal split. // TODO: Pick num splits based on |limit|. - override def execute() = sqlContext.sparkContext.makeRDD(executeCollect(), 1) + override def execute() = sparkContext.makeRDD(executeCollect(), 1) } /** @@ -189,15 +187,13 @@ case class Sort( override def requiredChildDistribution = if (global) OrderedDistribution(sortOrder) :: Nil else UnspecifiedDistribution :: Nil - @transient - lazy val ordering = new RowOrdering(sortOrder) override def execute() = attachTree(this, "sort") { - // TODO: Optimize sorting operation? child.execute() - .mapPartitions( - iterator => iterator.map(_.copy()).toArray.sorted(ordering).iterator, - preservesPartitioning = true) + .mapPartitions( { iterator => + val ordering = newOrdering(sortOrder, child.output) + iterator.map(_.copy()).toArray.sorted(ordering).iterator + }, preservesPartitioning = true) } override def output = child.output diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala index c6fbd6d2f6930..5ef46c32d44bc 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala @@ -41,13 +41,13 @@ package object debug { */ @DeveloperApi implicit class DebugQuery(query: SchemaRDD) { - def debug(implicit sc: SparkContext): Unit = { + def debug(): Unit = { val plan = query.queryExecution.executedPlan val visited = new collection.mutable.HashSet[Long]() val debugPlan = plan transform { case s: SparkPlan if !visited.contains(s.id) => visited += s.id - DebugNode(sc, s) + DebugNode(s) } println(s"Results returned: ${debugPlan.execute().count()}") debugPlan.foreach { @@ -57,9 +57,7 @@ package object debug { } } - private[sql] case class DebugNode( - @transient sparkContext: SparkContext, - child: SparkPlan) extends UnaryNode { + private[sql] case class DebugNode(child: SparkPlan) extends UnaryNode { def references = Set.empty def output = child.output diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins.scala index 7d1f11caae838..2750ddbce896f 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins.scala @@ -38,6 +38,8 @@ case object BuildLeft extends BuildSide case object BuildRight extends BuildSide trait HashJoin { + self: SparkPlan => + val leftKeys: Seq[Expression] val rightKeys: Seq[Expression] val buildSide: BuildSide @@ -56,9 +58,9 @@ trait HashJoin { def output = left.output ++ right.output - @transient lazy val buildSideKeyGenerator = new Projection(buildKeys, buildPlan.output) + @transient lazy val buildSideKeyGenerator = newProjection(buildKeys, buildPlan.output) @transient lazy val streamSideKeyGenerator = - () => new MutableProjection(streamedKeys, streamedPlan.output) + newMutableProjection(streamedKeys, streamedPlan.output) def joinIterators(buildIter: Iterator[Row], streamIter: Iterator[Row]): Iterator[Row] = { // TODO: Use Spark's HashMap implementation. @@ -217,9 +219,8 @@ case class BroadcastHashJoin( rightKeys: Seq[Expression], buildSide: BuildSide, left: SparkPlan, - right: SparkPlan)(@transient sqlContext: SQLContext) extends BinaryNode with HashJoin { + right: SparkPlan) extends BinaryNode with HashJoin { - override def otherCopyArgs = sqlContext :: Nil override def outputPartitioning: Partitioning = left.outputPartitioning @@ -228,7 +229,7 @@ case class BroadcastHashJoin( @transient lazy val broadcastFuture = future { - sqlContext.sparkContext.broadcast(buildPlan.executeCollect()) + sparkContext.broadcast(buildPlan.executeCollect()) } def execute() = { @@ -248,14 +249,11 @@ case class BroadcastHashJoin( @DeveloperApi case class LeftSemiJoinBNL( streamed: SparkPlan, broadcast: SparkPlan, condition: Option[Expression]) - (@transient sqlContext: SQLContext) extends BinaryNode { // TODO: Override requiredChildDistribution. override def outputPartitioning: Partitioning = streamed.outputPartitioning - override def otherCopyArgs = sqlContext :: Nil - def output = left.output /** The Streamed Relation */ @@ -271,7 +269,7 @@ case class LeftSemiJoinBNL( def execute() = { val broadcastedRelation = - sqlContext.sparkContext.broadcast(broadcast.execute().map(_.copy()).collect().toIndexedSeq) + sparkContext.broadcast(broadcast.execute().map(_.copy()).collect().toIndexedSeq) streamed.execute().mapPartitions { streamedIter => val joinedRow = new JoinedRow @@ -300,8 +298,14 @@ case class LeftSemiJoinBNL( case class CartesianProduct(left: SparkPlan, right: SparkPlan) extends BinaryNode { def output = left.output ++ right.output - def execute() = left.execute().map(_.copy()).cartesian(right.execute().map(_.copy())).map { - case (l: Row, r: Row) => buildRow(l ++ r) + def execute() = { + val leftResults = left.execute().map(_.copy()) + val rightResults = right.execute().map(_.copy()) + + leftResults.cartesian(rightResults).mapPartitions { iter => + val joinedRow = new JoinedRow + iter.map(r => joinedRow(r._1, r._2)) + } } } @@ -311,14 +315,11 @@ case class CartesianProduct(left: SparkPlan, right: SparkPlan) extends BinaryNod @DeveloperApi case class BroadcastNestedLoopJoin( streamed: SparkPlan, broadcast: SparkPlan, joinType: JoinType, condition: Option[Expression]) - (@transient sqlContext: SQLContext) extends BinaryNode { // TODO: Override requiredChildDistribution. override def outputPartitioning: Partitioning = streamed.outputPartitioning - override def otherCopyArgs = sqlContext :: Nil - override def output = { joinType match { case LeftOuter => @@ -345,13 +346,14 @@ case class BroadcastNestedLoopJoin( def execute() = { val broadcastedRelation = - sqlContext.sparkContext.broadcast(broadcast.execute().map(_.copy()).collect().toIndexedSeq) + sparkContext.broadcast(broadcast.execute().map(_.copy()).collect().toIndexedSeq) val streamedPlusMatches = streamed.execute().mapPartitions { streamedIter => val matchedRows = new ArrayBuffer[Row] // TODO: Use Spark's BitSet. val includedBroadcastTuples = new BitSet(broadcastedRelation.value.size) val joinedRow = new JoinedRow + val rightNulls = new GenericMutableRow(right.output.size) streamedIter.foreach { streamedRow => var i = 0 @@ -361,7 +363,7 @@ case class BroadcastNestedLoopJoin( // TODO: One bitset per partition instead of per row. val broadcastedRow = broadcastedRelation.value(i) if (boundCondition(joinedRow(streamedRow, broadcastedRow))) { - matchedRows += buildRow(streamedRow ++ broadcastedRow) + matchedRows += joinedRow(streamedRow, broadcastedRow).copy() matched = true includedBroadcastTuples += i } @@ -369,7 +371,7 @@ case class BroadcastNestedLoopJoin( } if (!matched && (joinType == LeftOuter || joinType == FullOuter)) { - matchedRows += buildRow(streamedRow ++ Array.fill(right.output.size)(null)) + matchedRows += joinedRow(streamedRow, rightNulls).copy() } } Iterator((matchedRows, includedBroadcastTuples)) @@ -383,20 +385,20 @@ case class BroadcastNestedLoopJoin( streamedPlusMatches.map(_._2).reduce(_ ++ _) } + val leftNulls = new GenericMutableRow(left.output.size) val rightOuterMatches: Seq[Row] = if (joinType == RightOuter || joinType == FullOuter) { broadcastedRelation.value.zipWithIndex.filter { case (row, i) => !allIncludedBroadcastTuples.contains(i) }.map { - // TODO: Use projection. - case (row, _) => buildRow(Vector.fill(left.output.size)(null) ++ row) + case (row, _) => new JoinedRow(leftNulls, row) } } else { Vector() } // TODO: Breaks lineage. - sqlContext.sparkContext.union( - streamedPlusMatches.flatMap(_._1), sqlContext.sparkContext.makeRDD(rightOuterMatches)) + sparkContext.union( + streamedPlusMatches.flatMap(_._1), sparkContext.makeRDD(rightOuterMatches)) } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetRelation.scala index 8c7dbd5eb4a09..b3bae5db0edbc 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetRelation.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetRelation.scala @@ -46,7 +46,8 @@ import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, LeafNode} */ private[sql] case class ParquetRelation( path: String, - @transient conf: Option[Configuration] = None) + @transient conf: Option[Configuration], + @transient sqlContext: SQLContext) extends LeafNode with MultiInstanceRelation { self: Product => @@ -61,7 +62,7 @@ private[sql] case class ParquetRelation( /** Attributes */ override val output = ParquetTypesConverter.readSchemaFromFile(new Path(path), conf) - override def newInstance = ParquetRelation(path).asInstanceOf[this.type] + override def newInstance = ParquetRelation(path, conf, sqlContext).asInstanceOf[this.type] // Equals must also take into account the output attributes so that we can distinguish between // different instances of the same relation, @@ -70,6 +71,9 @@ private[sql] case class ParquetRelation( p.path == path && p.output == output case _ => false } + + // TODO: Use data from the footers. + override lazy val statistics = Statistics(sizeInBytes = sqlContext.defaultSizeInBytes) } private[sql] object ParquetRelation { @@ -106,13 +110,14 @@ private[sql] object ParquetRelation { */ def create(pathString: String, child: LogicalPlan, - conf: Configuration): ParquetRelation = { + conf: Configuration, + sqlContext: SQLContext): ParquetRelation = { if (!child.resolved) { throw new UnresolvedException[LogicalPlan]( child, "Attempt to create Parquet table from unresolved child (when schema is not available)") } - createEmpty(pathString, child.output, false, conf) + createEmpty(pathString, child.output, false, conf, sqlContext) } /** @@ -127,14 +132,15 @@ private[sql] object ParquetRelation { def createEmpty(pathString: String, attributes: Seq[Attribute], allowExisting: Boolean, - conf: Configuration): ParquetRelation = { + conf: Configuration, + sqlContext: SQLContext): ParquetRelation = { val path = checkPath(pathString, allowExisting, conf) if (conf.get(ParquetOutputFormat.COMPRESSION) == null) { conf.set(ParquetOutputFormat.COMPRESSION, ParquetRelation.defaultCompression.name()) } ParquetRelation.enableLogForwarding() ParquetTypesConverter.writeMetaData(attributes, path, conf) - new ParquetRelation(path.toString, Some(conf)) { + new ParquetRelation(path.toString, Some(conf), sqlContext) { override val output = attributes } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTableOperations.scala b/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTableOperations.scala index ea74320d06c86..912a9f002b7d1 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTableOperations.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTableOperations.scala @@ -55,8 +55,7 @@ case class ParquetTableScan( // https://issues.apache.org/jira/browse/SPARK-1367 output: Seq[Attribute], relation: ParquetRelation, - columnPruningPred: Seq[Expression])( - @transient val sqlContext: SQLContext) + columnPruningPred: Seq[Expression]) extends LeafNode { override def execute(): RDD[Row] = { @@ -99,8 +98,6 @@ case class ParquetTableScan( .filter(_ != null) // Parquet's record filters may produce null values } - override def otherCopyArgs = sqlContext :: Nil - /** * Applies a (candidate) projection. * @@ -110,7 +107,7 @@ case class ParquetTableScan( def pruneColumns(prunedAttributes: Seq[Attribute]): ParquetTableScan = { val success = validateProjection(prunedAttributes) if (success) { - ParquetTableScan(prunedAttributes, relation, columnPruningPred)(sqlContext) + ParquetTableScan(prunedAttributes, relation, columnPruningPred) } else { sys.error("Warning: Could not validate Parquet schema projection in pruneColumns") this @@ -150,8 +147,7 @@ case class ParquetTableScan( case class InsertIntoParquetTable( relation: ParquetRelation, child: SparkPlan, - overwrite: Boolean = false)( - @transient val sqlContext: SQLContext) + overwrite: Boolean = false) extends UnaryNode with SparkHadoopMapReduceUtil { /** @@ -171,7 +167,7 @@ case class InsertIntoParquetTable( val writeSupport = if (child.output.map(_.dataType).forall(_.isPrimitive)) { - logger.debug("Initializing MutableRowWriteSupport") + log.debug("Initializing MutableRowWriteSupport") classOf[org.apache.spark.sql.parquet.MutableRowWriteSupport] } else { classOf[org.apache.spark.sql.parquet.RowWriteSupport] @@ -203,8 +199,6 @@ case class InsertIntoParquetTable( override def output = child.output - override def otherCopyArgs = sqlContext :: Nil - /** * Stores the given Row RDD as a Hadoop file. * diff --git a/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTestData.scala b/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTestData.scala index d4599da711254..837ea7695dbb3 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTestData.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTestData.scala @@ -22,6 +22,7 @@ import java.io.File import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileStatus, FileSystem, Path} import org.apache.hadoop.mapreduce.Job +import org.apache.spark.sql.test.TestSQLContext import parquet.example.data.{GroupWriter, Group} import parquet.example.data.simple.SimpleGroup @@ -103,7 +104,7 @@ private[sql] object ParquetTestData { val testDir = Utils.createTempDir() val testFilterDir = Utils.createTempDir() - lazy val testData = new ParquetRelation(testDir.toURI.toString) + lazy val testData = new ParquetRelation(testDir.toURI.toString, None, TestSQLContext) val testNestedSchema1 = // based on blogpost example, source: @@ -202,8 +203,10 @@ private[sql] object ParquetTestData { val testNestedDir3 = Utils.createTempDir() val testNestedDir4 = Utils.createTempDir() - lazy val testNestedData1 = new ParquetRelation(testNestedDir1.toURI.toString) - lazy val testNestedData2 = new ParquetRelation(testNestedDir2.toURI.toString) + lazy val testNestedData1 = + new ParquetRelation(testNestedDir1.toURI.toString, None, TestSQLContext) + lazy val testNestedData2 = + new ParquetRelation(testNestedDir2.toURI.toString, None, TestSQLContext) def writeFile() = { testDir.delete() diff --git a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala index 8e1e1971d968b..1fd8d27b34c59 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala @@ -45,6 +45,7 @@ class QueryTest extends PlanTest { |${rdd.queryExecution} |== Exception == |$e + |${org.apache.spark.sql.catalyst.util.stackTraceToString(e)} """.stripMargin) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala index 215618e852eb2..76b1724471442 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala @@ -39,22 +39,22 @@ class PlannerSuite extends FunSuite { test("count is partially aggregated") { val query = testData.groupBy('value)(Count('key)).queryExecution.analyzed - val planned = PartialAggregation(query).head - val aggregations = planned.collect { case a: Aggregate => a } + val planned = HashAggregation(query).head + val aggregations = planned.collect { case n if n.nodeName contains "Aggregate" => n } assert(aggregations.size === 2) } test("count distinct is not partially aggregated") { val query = testData.groupBy('value)(CountDistinct('key :: Nil)).queryExecution.analyzed - val planned = PartialAggregation(query) + val planned = HashAggregation(query) assert(planned.isEmpty) } test("mixed aggregates are not partially aggregated") { val query = testData.groupBy('value)(Count('value), CountDistinct('key :: Nil)).queryExecution.analyzed - val planned = PartialAggregation(query) + val planned = HashAggregation(query) assert(planned.isEmpty) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/TgfSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/TgfSuite.scala index e55648b8ed15a..2cab5e0c44d92 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/TgfSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/TgfSuite.scala @@ -33,7 +33,7 @@ import org.apache.spark.sql.test.TestSQLContext._ * Note: this is only a rough example of how TGFs can be expressed, the final version will likely * involve a lot more sugar for cleaner use in Scala/Java/etc. */ -case class ExampleTGF(input: Seq[Attribute] = Seq('name, 'age)) extends Generator { +case class ExampleTGF(input: Seq[Expression] = Seq('name, 'age)) extends Generator { def children = input protected def makeOutput() = 'nameAndAge.string :: Nil diff --git a/sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala index 3c911e9a4e7b1..561f5b4a49965 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala @@ -25,6 +25,7 @@ import parquet.schema.MessageTypeParser import org.apache.hadoop.fs.{FileSystem, Path} import org.apache.hadoop.mapreduce.Job + import org.apache.spark.SparkContext import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.{SqlLexical, SqlParser} @@ -32,6 +33,7 @@ import org.apache.spark.sql.catalyst.analysis.{Star, UnresolvedAttribute} import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.types.{BooleanType, IntegerType} import org.apache.spark.sql.catalyst.util.getTempFilePath +import org.apache.spark.sql.execution.SparkPlan import org.apache.spark.sql.test.TestSQLContext import org.apache.spark.sql.test.TestSQLContext._ import org.apache.spark.util.Utils @@ -207,10 +209,11 @@ class ParquetQuerySuite extends QueryTest with FunSuiteLike with BeforeAndAfterA } test("Projection of simple Parquet file") { + SparkPlan.currentContext.set(TestSQLContext) val scanner = new ParquetTableScan( ParquetTestData.testData.output, ParquetTestData.testData, - Seq())(TestSQLContext) + Seq()) val projected = scanner.pruneColumns(ParquetTypesConverter .convertToAttributes(MessageTypeParser .parseMessageType(ParquetTestData.subTestSchema))) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala index 84d43eaeea51d..f0a61270daf05 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala @@ -231,7 +231,7 @@ class HiveContext(sc: SparkContext) extends SQLContext(sc) { HiveTableScans, DataSinks, Scripts, - PartialAggregation, + HashAggregation, LeftSemiJoin, HashJoin, BasicOperators, diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala index c2b0b00aa5852..39033bdeac4b0 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala @@ -131,7 +131,7 @@ case class InsertIntoHiveTable( conf, SparkHiveHadoopWriter.createPathFromString(fileSinkConf.getDirName, conf)) - logger.debug("Saving as hadoop file of type " + valueClass.getSimpleName) + log.debug("Saving as hadoop file of type " + valueClass.getSimpleName) val writer = new SparkHiveHadoopWriter(conf, fileSinkConf) writer.preSetup() diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala index 8258ee5fef0eb..0c8f676e9c5c8 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala @@ -67,7 +67,7 @@ case class ScriptTransformation( } } readerThread.start() - val outputProjection = new Projection(input) + val outputProjection = new InterpretedProjection(input, child.output) iter .map(outputProjection) // TODO: Use SerDe diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala index 057eb60a02612..7582b4743d404 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala @@ -251,8 +251,10 @@ private[hive] case class HiveGenericUdtf( @transient protected lazy val function: GenericUDTF = createFunction() + @transient protected lazy val inputInspectors = children.map(_.dataType).map(toInspector) + @transient protected lazy val outputInspectors = { val structInspector = function.initialize(inputInspectors.toArray) structInspector.getAllStructFieldRefs.map(_.getFieldObjectInspector) @@ -278,7 +280,7 @@ private[hive] case class HiveGenericUdtf( override def eval(input: Row): TraversableOnce[Row] = { outputInspectors // Make sure initialized. - val inputProjection = new Projection(children) + val inputProjection = new InterpretedProjection(children) val collector = new UDTFCollector function.setCollector(collector) @@ -332,7 +334,7 @@ private[hive] case class HiveUdafFunction( override def eval(input: Row): Any = unwrapData(function.evaluate(buffer), returnInspector) @transient - val inputProjection = new Projection(exprs) + val inputProjection = new InterpretedProjection(exprs) def update(input: Row): Unit = { val inputs = inputProjection(input).asInstanceOf[Seq[AnyRef]].toArray diff --git a/sql/hive/src/test/resources/golden/case else null-0-8ef2f741400830ef889a9dd0c817fe3d b/sql/hive/src/test/resources/golden/case else null-0-8ef2f741400830ef889a9dd0c817fe3d new file mode 100644 index 0000000000000..00750edc07d64 --- /dev/null +++ b/sql/hive/src/test/resources/golden/case else null-0-8ef2f741400830ef889a9dd0c817fe3d @@ -0,0 +1 @@ +3 diff --git a/sql/hive/src/test/resources/golden/double case-0-f513687d17dcb18546fefa75000a52f2 b/sql/hive/src/test/resources/golden/double case-0-f513687d17dcb18546fefa75000a52f2 new file mode 100644 index 0000000000000..00750edc07d64 --- /dev/null +++ b/sql/hive/src/test/resources/golden/double case-0-f513687d17dcb18546fefa75000a52f2 @@ -0,0 +1 @@ +3 diff --git a/sql/hive/src/test/resources/golden/single case-0-c264e319c52f1840a32959d552b99e73 b/sql/hive/src/test/resources/golden/single case-0-c264e319c52f1840a32959d552b99e73 new file mode 100644 index 0000000000000..d00491fd7e5bb --- /dev/null +++ b/sql/hive/src/test/resources/golden/single case-0-c264e319c52f1840a32959d552b99e73 @@ -0,0 +1 @@ +1 diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala index aadfd2e900151..89cc589fb8001 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala @@ -19,6 +19,8 @@ package org.apache.spark.sql.hive.execution import scala.util.Try +import org.apache.spark.sql.{SchemaRDD, Row} +import org.apache.spark.sql.hive._ import org.apache.spark.sql.hive.test.TestHive import org.apache.spark.sql.hive.test.TestHive._ import org.apache.spark.sql.{Row, SchemaRDD} @@ -30,6 +32,15 @@ case class TestData(a: Int, b: String) */ class HiveQuerySuite extends HiveComparisonTest { + createQueryTest("single case", + """SELECT case when true then 1 else 2 end FROM src LIMIT 1""") + + createQueryTest("double case", + """SELECT case when 1 = 2 then 1 when 2 = 2 then 3 else 2 end FROM src LIMIT 1""") + + createQueryTest("case else null", + """SELECT case when 1 = 2 then 1 when 2 = 2 then 3 else null end FROM src LIMIT 1""") + createQueryTest("having no references", "SELECT key FROM src GROUP BY key HAVING COUNT(*) > 1")