Skip to content

Commit

Permalink
Revert "SPARK-1099:Spark's local mode should probably respect spark.c…
Browse files Browse the repository at this point in the history
…ores.max by default"

This reverts commit 1678931. Jenkins was not run for this PR.
  • Loading branch information
aarondav committed Mar 20, 2014
1 parent 1678931 commit ffe272d
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 22 deletions.
5 changes: 1 addition & 4 deletions core/src/main/scala/org/apache/spark/SparkContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -1262,10 +1262,7 @@ object SparkContext extends Logging {
master match {
case "local" =>
val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
// Use user specified in config, up to all available cores
val realCores = Runtime.getRuntime.availableProcessors()
val toUseCores = math.min(sc.conf.getInt("spark.cores.max", realCores), realCores)
val backend = new LocalBackend(scheduler, toUseCores)
val backend = new LocalBackend(scheduler, 1)
scheduler.initialize(backend)
scheduler

Expand Down
4 changes: 2 additions & 2 deletions core/src/test/scala/org/apache/spark/FileSuite.scala
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ import org.apache.spark.SparkContext._
class FileSuite extends FunSuite with LocalSparkContext {

test("text files") {
sc = new SparkContext("local[1]", "test")
sc = new SparkContext("local", "test")
val tempDir = Files.createTempDir()
val outputDir = new File(tempDir, "output").getAbsolutePath
val nums = sc.makeRDD(1 to 4)
Expand Down Expand Up @@ -176,7 +176,7 @@ class FileSuite extends FunSuite with LocalSparkContext {

test("write SequenceFile using new Hadoop API") {
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat
sc = new SparkContext("local[1]", "test")
sc = new SparkContext("local", "test")
val tempDir = Files.createTempDir()
val outputDir = new File(tempDir, "output").getAbsolutePath
val nums = sc.makeRDD(1 to 3).map(x => (new IntWritable(x), new Text("a" * x)))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@ import org.apache.spark.scheduler.local.LocalBackend
class SparkContextSchedulerCreationSuite
extends FunSuite with PrivateMethodTester with LocalSparkContext with Logging {

def createTaskScheduler(master: String, conf: SparkConf = new SparkConf()): TaskSchedulerImpl = {
def createTaskScheduler(master: String): TaskSchedulerImpl = {
// Create local SparkContext to setup a SparkEnv. We don't actually want to start() the
// real schedulers, so we don't want to create a full SparkContext with the desired scheduler.
sc = new SparkContext("local", "test", conf)
sc = new SparkContext("local", "test")
val createTaskSchedulerMethod = PrivateMethod[TaskScheduler]('createTaskScheduler)
val sched = SparkContext invokePrivate createTaskSchedulerMethod(sc, master)
sched.asInstanceOf[TaskSchedulerImpl]
Expand All @@ -44,26 +44,13 @@ class SparkContextSchedulerCreationSuite
}

test("local") {
var conf = new SparkConf()
conf.set("spark.cores.max", "1")
val sched = createTaskScheduler("local", conf)
val sched = createTaskScheduler("local")
sched.backend match {
case s: LocalBackend => assert(s.totalCores === 1)
case _ => fail()
}
}

test("local-cores-exceed") {
val cores = Runtime.getRuntime.availableProcessors() + 1
var conf = new SparkConf()
conf.set("spark.cores.max", cores.toString)
val sched = createTaskScheduler("local", conf)
sched.backend match {
case s: LocalBackend => assert(s.totalCores === Runtime.getRuntime.availableProcessors())
case _ => fail()
}
}

test("local-n") {
val sched = createTaskScheduler("local[5]")
assert(sched.maxTaskFailures === 1)
Expand Down

0 comments on commit ffe272d

Please sign in to comment.