diff --git a/R/pkg/tests/fulltests/test_context.R b/R/pkg/tests/fulltests/test_context.R index 288a2714a554e..0e5af3c29f1a0 100644 --- a/R/pkg/tests/fulltests/test_context.R +++ b/R/pkg/tests/fulltests/test_context.R @@ -93,6 +93,7 @@ test_that("rdd GC across sparkR.stop", { countRDD(rdd3) countRDD(rdd4) sparkR.session.stop() + expect_true(TRUE) }) test_that("job group functions can be called", { @@ -105,6 +106,7 @@ test_that("job group functions can be called", { suppressWarnings(cancelJobGroup(sc, "groupId")) suppressWarnings(clearJobGroup(sc)) sparkR.session.stop() + expect_true(TRUE) }) test_that("job description and local properties can be set and got", { @@ -143,6 +145,7 @@ test_that("utility function can be called", { sparkR.sparkContext(master = sparkRTestMaster) setLogLevel("ERROR") sparkR.session.stop() + expect_true(TRUE) }) test_that("getClientModeSparkSubmitOpts() returns spark-submit args from whitelist", { @@ -246,4 +249,5 @@ test_that("SPARK-25234: parallelize should not have integer overflow", { # 47000 * 47000 exceeds integer range parallelize(sc, 1:47000, 47000) sparkR.session.stop() + expect_true(TRUE) }) diff --git a/R/pkg/tests/fulltests/test_includePackage.R b/R/pkg/tests/fulltests/test_includePackage.R index f4ea0d1b5cb27..916361ff4c797 100644 --- a/R/pkg/tests/fulltests/test_includePackage.R +++ b/R/pkg/tests/fulltests/test_includePackage.R @@ -39,6 +39,7 @@ test_that("include inside function", { data <- lapplyPartition(rdd, generateData) actual <- collectRDD(data) } + expect_true(TRUE) }) test_that("use include package", { @@ -55,6 +56,7 @@ test_that("use include package", { data <- lapplyPartition(rdd, generateData) actual <- collectRDD(data) } + expect_true(TRUE) }) sparkR.session.stop() diff --git a/R/pkg/tests/fulltests/test_sparkSQL.R b/R/pkg/tests/fulltests/test_sparkSQL.R index cd262a494f166..38fb258e05140 100644 --- a/R/pkg/tests/fulltests/test_sparkSQL.R +++ b/R/pkg/tests/fulltests/test_sparkSQL.R @@ -1409,6 +1409,7 @@ test_that("column operators", { c5 <- c2 ^ c3 ^ c4 c6 <- c2 %<=>% c3 c7 <- !c6 + expect_true(TRUE) }) test_that("column functions", { diff --git a/R/pkg/tests/fulltests/test_textFile.R b/R/pkg/tests/fulltests/test_textFile.R index be2d2711ff88e..046018c7c2a2d 100644 --- a/R/pkg/tests/fulltests/test_textFile.R +++ b/R/pkg/tests/fulltests/test_textFile.R @@ -75,6 +75,7 @@ test_that("several transformations on RDD created by textFile()", { collectRDD(rdd) unlink(fileName) + expect_true(TRUE) }) test_that("textFile() followed by a saveAsTextFile() returns the same content", { diff --git a/R/pkg/tests/run-all.R b/R/pkg/tests/run-all.R index 1e96418558883..bf02ecdad66ff 100644 --- a/R/pkg/tests/run-all.R +++ b/R/pkg/tests/run-all.R @@ -20,7 +20,6 @@ library(SparkR) # SPARK-25572 if (identical(Sys.getenv("NOT_CRAN"), "true")) { - # Turn all warnings into errors options("warn" = 2) @@ -60,11 +59,23 @@ if (identical(Sys.getenv("NOT_CRAN"), "true")) { if (identical(Sys.getenv("NOT_CRAN"), "true")) { # set random seed for predictable results. mostly for base's sample() in tree and classification set.seed(42) - # for testthat 1.0.2 later, change reporter from "summary" to default_reporter() - testthat:::run_tests("SparkR", - file.path(sparkRDir, "pkg", "tests", "fulltests"), - NULL, - "summary") + + # TODO (SPARK-30663) To be removed once testthat 1.x is removed from all builds + if (grepl("^1\\..*", packageVersion("testthat"))) { + # testthat 1.x + test_runner <- testthat:::run_tests + reporter <- "summary" + + } else { + # testthat >= 2.0.0 + test_runner <- testthat:::test_package_dir + reporter <- testthat::default_reporter() + } + + test_runner("SparkR", + file.path(sparkRDir, "pkg", "tests", "fulltests"), + NULL, + reporter) } SparkR:::uninstallDownloadedSpark() diff --git a/appveyor.yml b/appveyor.yml index 7fb22f1b0f4b4..326845d8960d5 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -42,10 +42,9 @@ install: # Install maven and dependencies - ps: .\dev\appveyor-install-dependencies.ps1 # Required package for R unit tests - - cmd: R -e "install.packages(c('knitr', 'rmarkdown', 'devtools', 'e1071', 'survival'), repos='https://cloud.r-project.org/')" - # Here, we use the fixed version of testthat. For more details, please see SPARK-22817. - - cmd: R -e "devtools::install_version('testthat', version = '1.0.2', repos='https://cloud.r-project.org/')" - - cmd: R -e "packageVersion('knitr'); packageVersion('rmarkdown'); packageVersion('testthat'); packageVersion('e1071'); packageVersion('survival')" + - cmd: R -e "install.packages(c('knitr', 'rmarkdown', 'e1071', 'survival', 'arrow'), repos='https://cloud.r-project.org/')" + - cmd: R -e "install.packages(c('crayon', 'praise', 'R6', 'testthat'), repos='https://cloud.r-project.org/')" + - cmd: R -e "packageVersion('knitr'); packageVersion('rmarkdown'); packageVersion('testthat'); packageVersion('e1071'); packageVersion('survival'); packageVersion('arrow')" build_script: - cmd: mvn -DskipTests -Psparkr -Phive package diff --git a/docs/README.md b/docs/README.md index 41b1a2df2490f..7f7666b028b7b 100644 --- a/docs/README.md +++ b/docs/README.md @@ -22,9 +22,8 @@ $ sudo gem install jekyll jekyll-redirect-from pygments.rb $ sudo pip install Pygments # Following is needed only for generating API docs $ sudo pip install sphinx pypandoc mkdocs -$ sudo Rscript -e 'install.packages(c("knitr", "devtools", "rmarkdown"), repos="https://cloud.r-project.org/")' +$ sudo Rscript -e 'install.packages(c("knitr", "devtools", "testthat", "rmarkdown"), repos="https://cloud.r-project.org/")' $ sudo Rscript -e 'devtools::install_version("roxygen2", version = "5.0.1", repos="https://cloud.r-project.org/")' -$ sudo Rscript -e 'devtools::install_version("testthat", version = "1.0.2", repos="https://cloud.r-project.org/")' ``` Note: If you are on a system with both Ruby 1.9 and Ruby 2.0 you may need to replace gem with gem2.0. diff --git a/docs/building-spark.md b/docs/building-spark.md index 810c944333ee9..43a6ebf6f585f 100644 --- a/docs/building-spark.md +++ b/docs/building-spark.md @@ -58,7 +58,7 @@ This will build Spark distribution along with Python pip and R packages. For mor You can specify the exact version of Hadoop to compile against through the `hadoop.version` property. If unset, Spark will build against Hadoop 2.6.X by default. -You can enable the `yarn` profile and optionally set the `yarn.version` property if it is different +You can enable the `yarn` profile and optionally set the `yarn.version` property if it is different from `hadoop.version`. Examples: @@ -236,8 +236,7 @@ The run-tests script also can be limited to a specific Python version or a speci To run the SparkR tests you will need to install the [knitr](https://cran.r-project.org/package=knitr), [rmarkdown](https://cran.r-project.org/package=rmarkdown), [testthat](https://cran.r-project.org/package=testthat), [e1071](https://cran.r-project.org/package=e1071) and [survival](https://cran.r-project.org/package=survival) packages first: - Rscript -e "install.packages(c('knitr', 'rmarkdown', 'devtools', 'e1071', 'survival'), repos='https://cloud.r-project.org/')" - Rscript -e "devtools::install_version('testthat', version = '1.0.2', repos='https://cloud.r-project.org/')" + Rscript -e "install.packages(c('knitr', 'rmarkdown', 'devtools', 'testthat', 'e1071', 'survival'), repos='https://cloud.r-project.org/')" You can run just the SparkR tests using the command: