forked from apache/spark
-
Notifications
You must be signed in to change notification settings - Fork 52
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[SPARK-28571][CORE][SHUFFLE] Use the shuffle writer plugin for the So…
…rtShuffleWriter Use the shuffle writer APIs introduced in SPARK-28209 in the sort shuffle writer. Existing unit tests were changed to use the plugin instead, and they used the local disk version to ensure that there were no regressions. Closes apache#25342 from mccheah/shuffle-writer-refactor-sort-shuffle-writer. Lead-authored-by: mcheah <[email protected]> Co-authored-by: mccheah <[email protected]> Signed-off-by: Marcelo Vanzin <[email protected]>
- Loading branch information
Showing
6 changed files
with
262 additions
and
20 deletions.
There are no files selected for viewing
126 changes: 126 additions & 0 deletions
126
core/src/main/scala/org/apache/spark/shuffle/ShufflePartitionPairsWriter.scala
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,126 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.spark.shuffle | ||
|
||
import java.io.{Closeable, IOException, OutputStream} | ||
|
||
import org.apache.spark.serializer.{SerializationStream, SerializerInstance, SerializerManager} | ||
import org.apache.spark.shuffle.api.ShufflePartitionWriter | ||
import org.apache.spark.storage.BlockId | ||
import org.apache.spark.util.Utils | ||
import org.apache.spark.util.collection.PairsWriter | ||
|
||
/** | ||
* A key-value writer inspired by {@link DiskBlockObjectWriter} that pushes the bytes to an | ||
* arbitrary partition writer instead of writing to local disk through the block manager. | ||
*/ | ||
private[spark] class ShufflePartitionPairsWriter( | ||
partitionWriter: ShufflePartitionWriter, | ||
serializerManager: SerializerManager, | ||
serializerInstance: SerializerInstance, | ||
blockId: BlockId, | ||
writeMetrics: ShuffleWriteMetricsReporter) | ||
extends PairsWriter with Closeable { | ||
|
||
private var isClosed = false | ||
private var partitionStream: OutputStream = _ | ||
private var wrappedStream: OutputStream = _ | ||
private var objOut: SerializationStream = _ | ||
private var numRecordsWritten = 0 | ||
private var curNumBytesWritten = 0L | ||
|
||
override def write(key: Any, value: Any): Unit = { | ||
if (isClosed) { | ||
throw new IOException("Partition pairs writer is already closed.") | ||
} | ||
if (objOut == null) { | ||
open() | ||
} | ||
objOut.writeKey(key) | ||
objOut.writeValue(value) | ||
recordWritten() | ||
} | ||
|
||
private def open(): Unit = { | ||
try { | ||
partitionStream = partitionWriter.openStream | ||
wrappedStream = serializerManager.wrapStream(blockId, partitionStream) | ||
objOut = serializerInstance.serializeStream(wrappedStream) | ||
} catch { | ||
case e: Exception => | ||
Utils.tryLogNonFatalError { | ||
close() | ||
} | ||
throw e | ||
} | ||
} | ||
|
||
override def close(): Unit = { | ||
if (!isClosed) { | ||
Utils.tryWithSafeFinally { | ||
Utils.tryWithSafeFinally { | ||
objOut = closeIfNonNull(objOut) | ||
// Setting these to null will prevent the underlying streams from being closed twice | ||
// just in case any stream's close() implementation is not idempotent. | ||
wrappedStream = null | ||
partitionStream = null | ||
} { | ||
// Normally closing objOut would close the inner streams as well, but just in case there | ||
// was an error in initialization etc. we make sure we clean the other streams up too. | ||
Utils.tryWithSafeFinally { | ||
wrappedStream = closeIfNonNull(wrappedStream) | ||
// Same as above - if wrappedStream closes then assume it closes underlying | ||
// partitionStream and don't close again in the finally | ||
partitionStream = null | ||
} { | ||
partitionStream = closeIfNonNull(partitionStream) | ||
} | ||
} | ||
updateBytesWritten() | ||
} { | ||
isClosed = true | ||
} | ||
} | ||
} | ||
|
||
private def closeIfNonNull[T <: Closeable](closeable: T): T = { | ||
if (closeable != null) { | ||
closeable.close() | ||
} | ||
null.asInstanceOf[T] | ||
} | ||
|
||
/** | ||
* Notify the writer that a record worth of bytes has been written with OutputStream#write. | ||
*/ | ||
private def recordWritten(): Unit = { | ||
numRecordsWritten += 1 | ||
writeMetrics.incRecordsWritten(1) | ||
|
||
if (numRecordsWritten % 16384 == 0) { | ||
updateBytesWritten() | ||
} | ||
} | ||
|
||
private def updateBytesWritten(): Unit = { | ||
val numBytesWritten = partitionWriter.getNumBytesWritten | ||
val bytesWrittenDiff = numBytesWritten - curNumBytesWritten | ||
writeMetrics.incBytesWritten(bytesWrittenDiff) | ||
curNumBytesWritten = numBytesWritten | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
108 changes: 108 additions & 0 deletions
108
core/src/test/scala/org/apache/spark/shuffle/sort/SortShuffleWriterSuite.scala
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,108 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.spark.shuffle.sort | ||
|
||
import org.mockito.{Mock, MockitoAnnotations} | ||
import org.mockito.Answers.RETURNS_SMART_NULLS | ||
import org.mockito.Mockito._ | ||
import org.scalatest.Matchers | ||
|
||
import org.apache.spark.{Partitioner, SharedSparkContext, ShuffleDependency, SparkFunSuite} | ||
import org.apache.spark.memory.MemoryTestingUtils | ||
import org.apache.spark.serializer.JavaSerializer | ||
import org.apache.spark.shuffle.{BaseShuffleHandle, IndexShuffleBlockResolver} | ||
import org.apache.spark.shuffle.api.ShuffleExecutorComponents | ||
import org.apache.spark.shuffle.sort.io.LocalDiskShuffleExecutorComponents | ||
import org.apache.spark.storage.BlockManager | ||
import org.apache.spark.util.Utils | ||
|
||
|
||
class SortShuffleWriterSuite extends SparkFunSuite with SharedSparkContext with Matchers { | ||
|
||
@Mock(answer = RETURNS_SMART_NULLS) | ||
private var blockManager: BlockManager = _ | ||
|
||
private val shuffleId = 0 | ||
private val numMaps = 5 | ||
private var shuffleHandle: BaseShuffleHandle[Int, Int, Int] = _ | ||
private val shuffleBlockResolver = new IndexShuffleBlockResolver(conf) | ||
private val serializer = new JavaSerializer(conf) | ||
private var shuffleExecutorComponents: ShuffleExecutorComponents = _ | ||
|
||
override def beforeEach(): Unit = { | ||
super.beforeEach() | ||
MockitoAnnotations.initMocks(this) | ||
val partitioner = new Partitioner() { | ||
def numPartitions = numMaps | ||
def getPartition(key: Any) = Utils.nonNegativeMod(key.hashCode, numPartitions) | ||
} | ||
shuffleHandle = { | ||
val dependency = mock(classOf[ShuffleDependency[Int, Int, Int]]) | ||
when(dependency.partitioner).thenReturn(partitioner) | ||
when(dependency.serializer).thenReturn(serializer) | ||
when(dependency.aggregator).thenReturn(None) | ||
when(dependency.keyOrdering).thenReturn(None) | ||
new BaseShuffleHandle(shuffleId, numMaps = numMaps, dependency) | ||
} | ||
shuffleExecutorComponents = new LocalDiskShuffleExecutorComponents( | ||
conf, blockManager, shuffleBlockResolver) | ||
} | ||
|
||
override def afterAll(): Unit = { | ||
try { | ||
shuffleBlockResolver.stop() | ||
} finally { | ||
super.afterAll() | ||
} | ||
} | ||
|
||
test("write empty iterator") { | ||
val context = MemoryTestingUtils.fakeTaskContext(sc.env) | ||
val writer = new SortShuffleWriter[Int, Int, Int]( | ||
shuffleBlockResolver, | ||
shuffleHandle, | ||
mapId = 1, | ||
context, | ||
shuffleExecutorComponents) | ||
writer.write(Iterator.empty) | ||
writer.stop(success = true) | ||
val dataFile = shuffleBlockResolver.getDataFile(shuffleId, 1) | ||
val writeMetrics = context.taskMetrics().shuffleWriteMetrics | ||
assert(!dataFile.exists()) | ||
assert(writeMetrics.bytesWritten === 0) | ||
assert(writeMetrics.recordsWritten === 0) | ||
} | ||
|
||
test("write with some records") { | ||
val context = MemoryTestingUtils.fakeTaskContext(sc.env) | ||
val records = List[(Int, Int)]((1, 2), (2, 3), (4, 4), (6, 5)) | ||
val writer = new SortShuffleWriter[Int, Int, Int]( | ||
shuffleBlockResolver, | ||
shuffleHandle, | ||
mapId = 2, | ||
context, | ||
shuffleExecutorComponents) | ||
writer.write(records.toIterator) | ||
writer.stop(success = true) | ||
val dataFile = shuffleBlockResolver.getDataFile(shuffleId, 2) | ||
val writeMetrics = context.taskMetrics().shuffleWriteMetrics | ||
assert(dataFile.exists()) | ||
assert(dataFile.length() === writeMetrics.bytesWritten) | ||
assert(records.size === writeMetrics.recordsWritten) | ||
} | ||
} |