Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

SPARK-1171: when executor is removed, we should minus totalCores instead of just freeCores on that executor #63

Closed
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,6 @@ package org.apache.spark.scheduler
* Represents free resources available on an executor.
*/
private[spark]
class WorkerOffer(val executorId: String, val host: String, val cores: Int)
class WorkerOffer(val executorId: String, val host: String, var cores: Int) {
@transient val totalcores = cores
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why does this need to be transient? also use camelcase for naming (totalCores)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Actually on second thought, can CoarseGrainedSchedulerBackend just store the total cores for each worker in a hash map? I'd prefer that solution since other classes use WorkerOffer and don't use it to keep track of the total cores on each worker.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

+1 I'd also like to see WorkerOffer remain more like an immutable message type, with derived, mutable structures created only locally within the implementations that need it.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

On that note, it seems to me that WorkerOffer should just be a case class, since all the constructor parameters are public vals anyway.

}
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,7 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, actorSystem: A

class DriverActor(sparkProperties: Seq[(String, String)]) extends Actor {
private val executorActor = new HashMap[String, ActorRef]
private val executorAddress = new HashMap[String, Address]
private val executorHost = new HashMap[String, String]
private val freeCores = new HashMap[String, Int]
private val workerOffers = new HashMap[String, WorkerOffer]
private val addressToExecutorId = new HashMap[Address, String]

override def preStart() {
Expand All @@ -75,9 +73,8 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, actorSystem: A
logInfo("Registered executor: " + sender + " with ID " + executorId)
sender ! RegisteredExecutor(sparkProperties)
executorActor(executorId) = sender
executorHost(executorId) = Utils.parseHostPort(hostPort)._1
freeCores(executorId) = cores
executorAddress(executorId) = sender.path.address
workerOffers += (executorId ->
new WorkerOffer(executorId, Utils.parseHostPort(hostPort)._1, cores))
addressToExecutorId(sender.path.address) = executorId
totalCoreCount.addAndGet(cores)
makeOffers()
Expand All @@ -87,7 +84,7 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, actorSystem: A
scheduler.statusUpdate(taskId, state, data.value)
if (TaskState.isFinished(state)) {
if (executorActor.contains(executorId)) {
freeCores(executorId) += 1
workerOffers(executorId).cores += 1
makeOffers(executorId)
} else {
// Ignoring the update since we don't know about the executor.
Expand Down Expand Up @@ -125,20 +122,18 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, actorSystem: A

// Make fake resource offers on all executors
def makeOffers() {
launchTasks(scheduler.resourceOffers(
executorHost.toArray.map {case (id, host) => new WorkerOffer(id, host, freeCores(id))}))
launchTasks(scheduler.resourceOffers(workerOffers.values.toSeq))
}

// Make fake resource offers on just one executor
def makeOffers(executorId: String) {
launchTasks(scheduler.resourceOffers(
Seq(new WorkerOffer(executorId, executorHost(executorId), freeCores(executorId)))))
launchTasks(scheduler.resourceOffers(Seq(workerOffers(executorId))))
}

// Launch tasks returned by a set of resource offers
def launchTasks(tasks: Seq[Seq[TaskDescription]]) {
for (task <- tasks.flatten) {
freeCores(task.executorId) -= 1
workerOffers(task.executorId).cores -= 1
executorActor(task.executorId) ! LaunchTask(task)
}
}
Expand All @@ -147,11 +142,9 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, actorSystem: A
def removeExecutor(executorId: String, reason: String) {
if (executorActor.contains(executorId)) {
logInfo("Executor " + executorId + " disconnected, so removing it")
val numCores = freeCores(executorId)
addressToExecutorId -= executorAddress(executorId)
val numCores = workerOffers(executorId).totalcores
executorActor -= executorId
executorHost -= executorId
freeCores -= executorId
workerOffers -= executorId
totalCoreCount.addAndGet(-numCores)
scheduler.executorLost(executorId, SlaveLost(reason))
}
Expand Down