diff --git a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala b/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala index 74544ce33021e..40f337bcc4429 100644 --- a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala +++ b/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala @@ -413,11 +413,11 @@ object ApplicationMaster extends Logging { private var master: ApplicationMaster = _ - def main(argStrings: Array[String]) = { + def main(args: Array[String]) = { SignalLogger.register(log) - val args = new ApplicationMasterArguments(argStrings) + val amArgs = new ApplicationMasterArguments(args) SparkHadoopUtil.get.runAsSparkUser { () => - master = new ApplicationMaster(args, new YarnRMClientImpl(args)) + master = new ApplicationMaster(amArgs, new YarnRMClientImpl(amArgs)) master.run() } } @@ -427,3 +427,15 @@ object ApplicationMaster extends Logging { } } + +/** + * This object does not provide any special functionality. It exists so that it's easy to tell + * apart the client-mode AM from the cluster-mode AM when using tools such as ps or jps. + */ +object ExecutorLauncher { + + def main(args: Array[String]) = { + ApplicationMaster.main(args) + } + +} diff --git a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala b/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala index 9e6507cc7187a..8a789f3eaac67 100644 --- a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala +++ b/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala @@ -312,6 +312,8 @@ trait ClientBase extends Logging { val amContainer = Records.newRecord(classOf[ContainerLaunchContext]) amContainer.setLocalResources(localResources) + val isLaunchingDriver = args.userClass != null + // In cluster mode, if the deprecated SPARK_JAVA_OPTS is set, we need to propagate it to // executors. But we can't just set spark.executor.extraJavaOptions, because the driver's // SparkContext will not let that set spark* system properties, which is expected behavior for @@ -320,7 +322,7 @@ trait ClientBase extends Logging { // Note that to warn the user about the deprecation in cluster mode, some code from // SparkConf#validateSettings() is duplicated here (to avoid triggering the condition // described above). - if (args.userClass != null) { + if (isLaunchingDriver) { sys.env.get("SPARK_JAVA_OPTS").foreach { value => val warning = s""" @@ -380,7 +382,7 @@ trait ClientBase extends Logging { javaOpts += YarnSparkHadoopUtil.escapeForShell(s"-D$k=$v") } - if (args.userClass != null) { + if (isLaunchingDriver) { sparkConf.getOption("spark.driver.extraJavaOptions") .orElse(sys.env.get("SPARK_JAVA_OPTS")) .foreach(opts => javaOpts += opts) @@ -394,6 +396,12 @@ trait ClientBase extends Logging { } else { Nil } + val amClass = + if (isLaunchingDriver) { + classOf[ApplicationMaster].getName() + } else { + classOf[ApplicationMaster].getName().replace("ApplicationMaster", "ExecutorLauncher") + } val amArgs = Seq(classOf[ApplicationMaster].getName()) ++ userClass ++ (if (args.userJar != null) Seq("--jar", args.userJar) else Nil) ++