Skip to content

Commit

Permalink
Merge branch 'master' into SPARK-31670
Browse files Browse the repository at this point in the history
  • Loading branch information
AngersZhuuuu authored Jun 4, 2020
2 parents 27c495b + 4bbe3c2 commit 4c0b04c
Show file tree
Hide file tree
Showing 655 changed files with 17,571 additions and 8,528 deletions.
2 changes: 1 addition & 1 deletion R/pkg/DESCRIPTION
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ Suggests:
testthat,
e1071,
survival,
arrow
arrow (>= 0.15.1)
Collate:
'schema.R'
'generics.R'
Expand Down
3 changes: 2 additions & 1 deletion appveyor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,8 @@ install:
build_script:
# '-Djna.nosys=true' is required to avoid kernel32.dll load failure.
# See SPARK-28759.
- cmd: mvn -DskipTests -Psparkr -Phive -Djna.nosys=true package
# Ideally we should check the tests related to Hive in SparkR as well (SPARK-31745).
- cmd: mvn -DskipTests -Psparkr -Djna.nosys=true package

environment:
NOT_CRAN: true
Expand Down
30 changes: 29 additions & 1 deletion bin/docker-image-tool.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
# This script builds and pushes docker images when run from a release of Spark
# with Kubernetes support.

set -x

function error {
echo "$@" 1>&2
exit 1
Expand Down Expand Up @@ -172,13 +174,19 @@ function build {
local BASEDOCKERFILE=${BASEDOCKERFILE:-"kubernetes/dockerfiles/spark/Dockerfile"}
local PYDOCKERFILE=${PYDOCKERFILE:-false}
local RDOCKERFILE=${RDOCKERFILE:-false}
local ARCHS=${ARCHS:-"--platform linux/amd64,linux/arm64"}

(cd $(img_ctx_dir base) && docker build $NOCACHEARG "${BUILD_ARGS[@]}" \
-t $(image_ref spark) \
-f "$BASEDOCKERFILE" .)
if [ $? -ne 0 ]; then
error "Failed to build Spark JVM Docker image, please refer to Docker build output for details."
fi
if [ "${CROSS_BUILD}" != "false" ]; then
(cd $(img_ctx_dir base) && docker buildx build $ARCHS $NOCACHEARG "${BUILD_ARGS[@]}" \
-t $(image_ref spark) \
-f "$BASEDOCKERFILE" .)
fi

if [ "${PYDOCKERFILE}" != "false" ]; then
(cd $(img_ctx_dir pyspark) && docker build $NOCACHEARG "${BINDING_BUILD_ARGS[@]}" \
Expand All @@ -187,6 +195,11 @@ function build {
if [ $? -ne 0 ]; then
error "Failed to build PySpark Docker image, please refer to Docker build output for details."
fi
if [ "${CROSS_BUILD}" != "false" ]; then
(cd $(img_ctx_dir pyspark) && docker buildx build $ARCHS $NOCACHEARG "${BINDING_BUILD_ARGS[@]}" \
-t $(image_ref spark-py) \
-f "$PYDOCKERFILE" .)
fi
fi

if [ "${RDOCKERFILE}" != "false" ]; then
Expand All @@ -196,6 +209,11 @@ function build {
if [ $? -ne 0 ]; then
error "Failed to build SparkR Docker image, please refer to Docker build output for details."
fi
if [ "${CROSS_BUILD}" != "false" ]; then
(cd $(img_ctx_dir sparkr) && docker buildx build $ARCHS $NOCACHEARG "${BINDING_BUILD_ARGS[@]}" \
-t $(image_ref spark-r) \
-f "$RDOCKERFILE" .)
fi
fi
}

Expand Down Expand Up @@ -227,6 +245,8 @@ Options:
-n Build docker image with --no-cache
-u uid UID to use in the USER directive to set the user the main Spark process runs as inside the
resulting container
-X Use docker buildx to cross build. Automatically pushes.
See https://docs.docker.com/buildx/working-with-buildx/ for steps to setup buildx.
-b arg Build arg to build or push the image. For multiple build args, this option needs to
be used separately for each build arg.
Expand All @@ -252,6 +272,12 @@ Examples:
- Build and push JDK11-based image with tag "v3.0.0" to docker.io/myrepo
$0 -r docker.io/myrepo -t v3.0.0 -b java_image_tag=11-jre-slim build
$0 -r docker.io/myrepo -t v3.0.0 push
- Build and push JDK11-based image for multiple archs to docker.io/myrepo
$0 -r docker.io/myrepo -t v3.0.0 -X -b java_image_tag=11-jre-slim build
# Note: buildx, which does cross building, needs to do the push during build
# So there is no seperate push step with -X
EOF
}

Expand All @@ -268,7 +294,8 @@ RDOCKERFILE=
NOCACHEARG=
BUILD_PARAMS=
SPARK_UID=
while getopts f:p:R:mr:t:nb:u: option
CROSS_BUILD="false"
while getopts f:p:R:mr:t:Xnb:u: option
do
case "${option}"
in
Expand All @@ -279,6 +306,7 @@ do
t) TAG=${OPTARG};;
n) NOCACHEARG="--no-cache";;
b) BUILD_PARAMS=${BUILD_PARAMS}" --build-arg "${OPTARG};;
X) CROSS_BUILD=1;;
m)
if ! which minikube 1>/dev/null; then
error "Cannot find minikube."
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -229,8 +229,6 @@ public class ShuffleMetrics implements MetricSet {
private final Meter blockTransferRateBytes = new Meter();
// Number of active connections to the shuffle service
private Counter activeConnections = new Counter();
// Number of registered connections to the shuffle service
private Counter registeredConnections = new Counter();
// Number of exceptions caught in connections to the shuffle service
private Counter caughtExceptions = new Counter();

Expand All @@ -242,7 +240,6 @@ public ShuffleMetrics() {
allMetrics.put("registeredExecutorsSize",
(Gauge<Integer>) () -> blockManager.getRegisteredExecutorsSize());
allMetrics.put("numActiveConnections", activeConnections);
allMetrics.put("numRegisteredConnections", registeredConnections);
allMetrics.put("numCaughtExceptions", caughtExceptions);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,7 @@ protected void serviceInit(Configuration conf) throws Exception {
// register metrics on the block handler into the Node Manager's metrics system.
blockHandler.getAllMetrics().getMetrics().put("numRegisteredConnections",
shuffleServer.getRegisteredConnections());
blockHandler.getAllMetrics().getMetrics().putAll(shuffleServer.getAllMetrics().getMetrics());
YarnShuffleServiceMetrics serviceMetrics =
new YarnShuffleServiceMetrics(blockHandler.getAllMetrics());

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,13 @@
* limitations under the License.
*/

package org.apache.spark.sql.execution
package org.apache.spark.tags;

/**
* Physical execution operators for join operations.
*/
package object joins {

sealed abstract class BuildSide

case object BuildRight extends BuildSide
import java.lang.annotation.*;

case object BuildLeft extends BuildSide
import org.scalatest.TagAnnotation;

}
@TagAnnotation
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.METHOD, ElementType.TYPE})
public @interface ChromeUITest { }
1 change: 1 addition & 0 deletions conf/spark-env.sh.template
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@
# Generic options for the daemons used in the standalone deploy mode
# - SPARK_CONF_DIR Alternate conf dir. (Default: ${SPARK_HOME}/conf)
# - SPARK_LOG_DIR Where log files are stored. (Default: ${SPARK_HOME}/logs)
# - SPARK_LOG_MAX_FILES Max log files of Spark daemons can rotate to. Default is 5.
# - SPARK_PID_DIR Where the pid file is stored. (Default: /tmp)
# - SPARK_IDENT_STRING A string representing this instance of spark. (Default: $USER)
# - SPARK_NICENESS The scheduling priority for daemons. (Default: 0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,11 @@ public void onSpeculativeTaskSubmitted(SparkListenerSpeculativeTaskSubmitted spe
onEvent(speculativeTask);
}

@Override
public void onResourceProfileAdded(SparkListenerResourceProfileAdded event) {
onEvent(event);
}

@Override
public void onOtherEvent(SparkListenerEvent event) {
onEvent(event);
Expand Down
Loading

0 comments on commit 4c0b04c

Please sign in to comment.