forked from apache/spark
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Generalized PeriodicGraphCheckpointer to PeriodicCheckpointer, with s…
…ubclasses for RDDs and Graphs.
- Loading branch information
Showing
6 changed files
with
471 additions
and
95 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
184 changes: 184 additions & 0 deletions
184
mllib/src/main/scala/org/apache/spark/mllib/impl/PeriodicCheckpointer.scala
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,184 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.spark.mllib.impl | ||
|
||
import scala.collection.mutable | ||
|
||
import org.apache.hadoop.fs.{Path, FileSystem} | ||
|
||
import org.apache.spark.{SparkContext, Logging} | ||
import org.apache.spark.storage.StorageLevel | ||
|
||
|
||
/** | ||
* This abstraction helps with persisting and checkpointing RDDs and types derived from RDDs | ||
* (such as Graphs and DataFrames). In documentation, we use the phrase "Dataset" to refer to | ||
* the distributed data type (RDD, Graph, etc.). | ||
* | ||
* Specifically, this abstraction automatically handles persisting and (optionally) checkpointing, | ||
* as well as unpersisting and removing checkpoint files. | ||
* | ||
* Users should call update() when a new Dataset has been created, | ||
* before the Dataset has been materialized. After updating [[PeriodicCheckpointer]], users are | ||
* responsible for materializing the Dataset to ensure that persisting and checkpointing actually | ||
* occur. | ||
* | ||
* When update() is called, this does the following: | ||
* - Persist new Dataset (if not yet persisted), and put in queue of persisted Datasets. | ||
* - Unpersist Datasets from queue until there are at most 3 persisted Datasets. | ||
* - If using checkpointing and the checkpoint interval has been reached, | ||
* - Checkpoint the new Dataset, and put in a queue of checkpointed Datasets. | ||
* - Remove older checkpoints. | ||
* | ||
* WARNINGS: | ||
* - This class should NOT be copied (since copies may conflict on which Datasets should be | ||
* checkpointed). | ||
* - This class removes checkpoint files once later Datasets have been checkpointed. | ||
* However, references to the older Datasets will still return isCheckpointed = true. | ||
* | ||
* Example usage: | ||
* {{{ | ||
* val (data1, data2, data3, ...) = ... | ||
* val cp = new PeriodicCheckpointer(data1, dir, 2) | ||
* data1.count(); | ||
* // persisted: data1 | ||
* cp.update(data2) | ||
* data2.count(); | ||
* // persisted: data1, data2 | ||
* // checkpointed: data2 | ||
* cp.update(data3) | ||
* data3.count(); | ||
* // persisted: data1, data2, data3 | ||
* // checkpointed: data2 | ||
* cp.update(data4) | ||
* data4.count(); | ||
* // persisted: data2, data3, data4 | ||
* // checkpointed: data4 | ||
* cp.update(data5) | ||
* data5.count(); | ||
* // persisted: data3, data4, data5 | ||
* // checkpointed: data4 | ||
* }}} | ||
* | ||
* @param currentData Initial Dataset | ||
* @param checkpointInterval Datasets will be checkpointed at this interval | ||
* @param sc SparkContext for the Datasets given to this checkpointer | ||
* @tparam T Dataset type, such as RDD[Double] | ||
*/ | ||
private[mllib] abstract class PeriodicCheckpointer[T]( | ||
var currentData: T, | ||
val checkpointInterval: Int, | ||
val sc: SparkContext) extends Logging { | ||
|
||
/** FIFO queue of past checkpointed Datasets */ | ||
private val checkpointQueue = mutable.Queue[T]() | ||
|
||
/** FIFO queue of past persisted Datasets */ | ||
private val persistedQueue = mutable.Queue[T]() | ||
|
||
/** Number of times [[update()]] has been called */ | ||
private var updateCount = 0 | ||
|
||
update(currentData) | ||
|
||
/** | ||
* Update [[currentData]] with a new Dataset. Handle persistence and checkpointing as needed. | ||
* Since this handles persistence and checkpointing, this should be called before the Dataset | ||
* has been materialized. | ||
* | ||
* @param newData New Dataset created from previous Datasets in the lineage. | ||
*/ | ||
def update(newData: T): Unit = { | ||
persist(newData) | ||
persistedQueue.enqueue(newData) | ||
// We try to maintain 2 Datasets in persistedQueue to support the semantics of this class: | ||
// Users should call [[update()]] when a new Dataset has been created, | ||
// before the Dataset has been materialized. | ||
while (persistedQueue.size > 3) { | ||
val dataToUnpersist = persistedQueue.dequeue() | ||
unpersist(dataToUnpersist) | ||
} | ||
updateCount += 1 | ||
|
||
// Handle checkpointing (after persisting) | ||
if ((updateCount % checkpointInterval) == 0 && sc.getCheckpointDir.nonEmpty) { | ||
// Add new checkpoint before removing old checkpoints. | ||
checkpoint(newData) | ||
checkpointQueue.enqueue(newData) | ||
// Remove checkpoints before the latest one. | ||
var canDelete = true | ||
while (checkpointQueue.size > 1 && canDelete) { | ||
// Delete the oldest checkpoint only if the next checkpoint exists. | ||
if (isCheckpointed(checkpointQueue.get(1).get)) { | ||
removeCheckpointFile() | ||
} else { | ||
canDelete = false | ||
} | ||
} | ||
} | ||
|
||
currentData = newData | ||
} | ||
|
||
/** Checkpoint the Dataset */ | ||
def checkpoint(data: T): Unit | ||
|
||
/** Return true iff the Dataset is checkpointed */ | ||
def isCheckpointed(data: T): Boolean | ||
|
||
/** | ||
* Persist the Dataset. | ||
* Note: This should handle checking the current [[StorageLevel]] of the Dataset. | ||
*/ | ||
def persist(data: T): Unit | ||
|
||
/** Unpersist the Dataset */ | ||
def unpersist(data: T): Unit | ||
|
||
/** Get list of checkpoint files for this given Dataset */ | ||
def getCheckpointFiles(data: T): Iterable[String] | ||
|
||
/** | ||
* Call this at the end to delete any remaining checkpoint files. | ||
*/ | ||
def deleteAllCheckpoints(): Unit = { | ||
while (checkpointQueue.nonEmpty) { | ||
removeCheckpointFile() | ||
} | ||
} | ||
|
||
/** | ||
* Dequeue the oldest checkpointed Dataset, and remove its checkpoint files. | ||
* This prints a warning but does not fail if the files cannot be removed. | ||
*/ | ||
private def removeCheckpointFile(): Unit = { | ||
val old = checkpointQueue.dequeue() | ||
// Since the old checkpoint is not deleted by Spark, we manually delete it. | ||
val fs = FileSystem.get(sc.hadoopConfiguration) | ||
getCheckpointFiles(old).foreach { checkpointFile => | ||
try { | ||
fs.delete(new Path(checkpointFile), true) | ||
} catch { | ||
case e: Exception => | ||
logWarning("PeriodicCheckpointer could not remove old checkpoint file: " + | ||
checkpointFile) | ||
} | ||
} | ||
} | ||
|
||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.