-
Notifications
You must be signed in to change notification settings - Fork 28.5k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[SPARK-24117][SQL] Unified the getSizePerRow #21189
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -27,6 +27,7 @@ import org.apache.spark.internal.Logging | |
import org.apache.spark.sql.Row | ||
import org.apache.spark.sql.catalyst.expressions.Attribute | ||
import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, Statistics} | ||
import org.apache.spark.sql.catalyst.plans.logical.statsEstimation.EstimationUtils | ||
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes.{Append, Complete, Update} | ||
import org.apache.spark.sql.execution.streaming.{MemorySinkBase, Sink} | ||
import org.apache.spark.sql.sources.v2.{DataSourceOptions, DataSourceV2, StreamWriteSupport} | ||
|
@@ -182,7 +183,7 @@ class MemoryDataWriter(partition: Int, outputMode: OutputMode) | |
* Used to query the data that has been written into a [[MemorySinkV2]]. | ||
*/ | ||
case class MemoryPlanV2(sink: MemorySinkV2, override val output: Seq[Attribute]) extends LeafNode { | ||
private val sizePerRow = output.map(_.dataType.defaultSize).sum | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I wouldn't think it's possible. |
||
private val sizePerRow = EstimationUtils.getSizePerRow(output) | ||
|
||
override def computeStats(): Statistics = Statistics(sizePerRow * sink.allData.size) | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -220,11 +220,11 @@ class MemorySinkSuite extends StreamTest with BeforeAndAfter { | |
|
||
sink.addBatch(0, 1 to 3) | ||
plan.invalidateStatsCache() | ||
assert(plan.stats.sizeInBytes === 12) | ||
assert(plan.stats.sizeInBytes === 36) | ||
|
||
sink.addBatch(1, 4 to 6) | ||
plan.invalidateStatsCache() | ||
assert(plan.stats.sizeInBytes === 24) | ||
assert(plan.stats.sizeInBytes === 72) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It shouldn't impact anything, but abstractly it seems strange that this unification would cause the stats to change? What are we doing differently to cause this, and how confident are we this won't happen to production sinks? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It seems we forgot to count the row object overhead (8 bytes) before in memory stream. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. SGTM then |
||
} | ||
|
||
ignore("stress test") { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
cc @juliuszsompolski @cloud-fan