From d9aa6545925ad611ac9e5705790a64b7a64d37d0 Mon Sep 17 00:00:00 2001 From: Ludovic Henry Date: Mon, 26 Apr 2021 20:24:09 +0200 Subject: [PATCH 01/15] Reduce code duplication for BLASBenchmark Factor how the various implementations are run. --- .../spark/ml/linalg/BLASBenchmark.scala | 540 +++++------------- 1 file changed, 130 insertions(+), 410 deletions(-) diff --git a/mllib-local/src/test/scala/org/apache/spark/ml/linalg/BLASBenchmark.scala b/mllib-local/src/test/scala/org/apache/spark/ml/linalg/BLASBenchmark.scala index 144f59ac172fe..f92107f735fb8 100644 --- a/mllib-local/src/test/scala/org/apache/spark/ml/linalg/BLASBenchmark.scala +++ b/mllib-local/src/test/scala/org/apache/spark/ml/linalg/BLASBenchmark.scala @@ -17,6 +17,7 @@ package org.apache.spark.ml.linalg +import dev.ludovic.netlib.{BLAS => NetlibBLAS} import dev.ludovic.netlib.blas.NetlibF2jBLAS import scala.concurrent.duration._ @@ -49,259 +50,132 @@ object BLASBenchmark extends BenchmarkBase { println("nativeBLAS = " + nativeBLAS.getClass.getName) // scalastyle:on println - runBenchmark("daxpy") { - val n = 1e8.toInt - val alpha = rnd.nextDouble - val x = Array.fill(n) { rnd.nextDouble } - val y = Array.fill(n) { rnd.nextDouble } - - val benchmark = new Benchmark("daxpy", n, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.daxpy(n, alpha, x, 1, y.clone, 1) - } - - benchmark.addCase("java") { _ => - javaBLAS.daxpy(n, alpha, x, 1, y.clone, 1) - } - - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.daxpy(n, alpha, x, 1, y.clone, 1) - } - } - - benchmark.run() - } - - runBenchmark("saxpy") { - val n = 1e8.toInt - val alpha = rnd.nextFloat - val x = Array.fill(n) { rnd.nextFloat } - val y = Array.fill(n) { rnd.nextFloat } - - val benchmark = new Benchmark("saxpy", n, iters, + def runBLASBenchmark(name: String, n: Int)(bench: NetlibBLAS => Unit) { + val benchmark = new Benchmark(name, n, iters, warmupTime = 30.seconds, minTime = 30.seconds, output = output) benchmark.addCase("f2j") { _ => - f2jBLAS.saxpy(n, alpha, x, 1, y.clone, 1) + bench(f2jBLAS) } benchmark.addCase("java") { _ => - javaBLAS.saxpy(n, alpha, x, 1, y.clone, 1) + bench(javaBLAS) } if (nativeBLAS != javaBLAS) { benchmark.addCase("native") { _ => - nativeBLAS.saxpy(n, alpha, x, 1, y.clone, 1) + bench(nativeBLAS) } } benchmark.run() } - runBenchmark("ddot") { + runBenchmark("daxpy") { val n = 1e8.toInt + val alpha = rnd.nextDouble val x = Array.fill(n) { rnd.nextDouble } val y = Array.fill(n) { rnd.nextDouble } - val benchmark = new Benchmark("ddot", n, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.ddot(n, x, 1, y, 1) - } - - benchmark.addCase("java") { _ => - javaBLAS.ddot(n, x, 1, y, 1) - } - - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.ddot(n, x, 1, y, 1) - } + runBLASBenchmark("daxpy", n) { impl => + impl.daxpy(n, alpha, x, 1, y.clone, 1) } - - benchmark.run() } - runBenchmark("sdot") { + runBenchmark("saxpy") { val n = 1e8.toInt + val alpha = rnd.nextFloat val x = Array.fill(n) { rnd.nextFloat } val y = Array.fill(n) { rnd.nextFloat } - val benchmark = new Benchmark("sdot", n, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.sdot(n, x, 1, y, 1) - } - - benchmark.addCase("java") { _ => - javaBLAS.sdot(n, x, 1, y, 1) - } - - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.sdot(n, x, 1, y, 1) - } + runBLASBenchmark("saxpy", n) { impl => + impl.saxpy(n, alpha, x, 1, y.clone, 1) } - - benchmark.run() } - runBenchmark("dscal") { + runBenchmark("dcopy") { val n = 1e8.toInt - val alpha = rnd.nextDouble val x = Array.fill(n) { rnd.nextDouble } + val y = Array.fill(n) { 0.0 } - val benchmark = new Benchmark("dscal", n, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.dscal(n, alpha, x.clone, 1) - } - - benchmark.addCase("java") { _ => - javaBLAS.dscal(n, alpha, x.clone, 1) - } - - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.dscal(n, alpha, x.clone, 1) - } + runBLASBenchmark("dcopy", n) { impl => + impl.dcopy(n, x, 1, y.clone, 1) } - - benchmark.run() } - runBenchmark("sscal") { + runBenchmark("scopy") { val n = 1e8.toInt - val alpha = rnd.nextFloat val x = Array.fill(n) { rnd.nextFloat } + val y = Array.fill(n) { 0.0f } - val benchmark = new Benchmark("sscal", n, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.sscal(n, alpha, x.clone, 1) + runBLASBenchmark("scopy", n) { impl => + impl.scopy(n, x, 1, y.clone, 1) } - - benchmark.addCase("java") { _ => - javaBLAS.sscal(n, alpha, x.clone, 1) - } - - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.sscal(n, alpha, x.clone, 1) - } - } - - benchmark.run() } - runBenchmark("dspmv[U]") { - val n = 1e3.toInt - val alpha = rnd.nextDouble - val a = Array.fill(n * (n + 1) / 2) { rnd.nextDouble } + runBenchmark("ddot") { + val n = 1e8.toInt val x = Array.fill(n) { rnd.nextDouble } - val beta = rnd.nextDouble val y = Array.fill(n) { rnd.nextDouble } - val benchmark = new Benchmark("dspmv[U]", n * (n + 1) / 2, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.dspmv("U", n, alpha, a, x, 1, beta, y.clone, 1) + runBLASBenchmark("ddot", n) { impl => + impl.ddot(n, x, 1, y, 1) } + } - benchmark.addCase("java") { _ => - javaBLAS.dspmv("U", n, alpha, a, x, 1, beta, y.clone, 1) - } + runBenchmark("sdot") { + val n = 1e8.toInt + val x = Array.fill(n) { rnd.nextFloat } + val y = Array.fill(n) { rnd.nextFloat } - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.dspmv("U", n, alpha, a, x, 1, beta, y.clone, 1) - } + runBLASBenchmark("sdot", n) { impl => + impl.sdot(n, x, 1, y, 1) } - - benchmark.run() } - runBenchmark("dspr[U]") { - val n = 1e3.toInt - val alpha = rnd.nextDouble + runBenchmark("dnrm2") { + val n = 1e8.toInt val x = Array.fill(n) { rnd.nextDouble } - val a = Array.fill(n * (n + 1) / 2) { rnd.nextDouble } - val benchmark = new Benchmark("dspr[U]", n * (n + 1) / 2, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.dspr("U", n, alpha, x, 1, a.clone) + runBLASBenchmark("dnrm2", n) { impl => + impl.dnrm2(n, x, 1) } + } - benchmark.addCase("java") { _ => - javaBLAS.dspr("U", n, alpha, x, 1, a.clone) - } + runBenchmark("snrm2") { + val n = 1e8.toInt + val x = Array.fill(n) { rnd.nextFloat } - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.dspr("U", n, alpha, x, 1, a.clone) - } + runBLASBenchmark("snrm2", n) { impl => + impl.snrm2(n, x, 1) } - - benchmark.run() } - runBenchmark("dsyr[U]") { - val n = 1e3.toInt + runBenchmark("dscal") { + val n = 1e8.toInt val alpha = rnd.nextDouble val x = Array.fill(n) { rnd.nextDouble } - val a = Array.fill(n * n) { rnd.nextDouble } - - val benchmark = new Benchmark("dsyr[U]", n * (n + 1) / 2, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - benchmark.addCase("f2j") { _ => - f2jBLAS.dsyr("U", n, alpha, x, 1, a.clone, n) + runBLASBenchmark("dscal", n) { impl => + impl.dscal(n, alpha, x.clone, 1) } + } - benchmark.addCase("java") { _ => - javaBLAS.dsyr("U", n, alpha, x, 1, a.clone, n) - } + runBenchmark("sscal") { + val n = 1e8.toInt + val alpha = rnd.nextFloat + val x = Array.fill(n) { rnd.nextFloat } - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.dsyr("U", n, alpha, x, 1, a.clone, n) - } + runBLASBenchmark("sscal", n) { impl => + impl.sscal(n, alpha, x.clone, 1) } - - benchmark.run() } runBenchmark("dgemv[N]") { - val m = 1e3.toInt - val n = 1e3.toInt + val m = 1e4.toInt + val n = 1e4.toInt val alpha = rnd.nextDouble val a = Array.fill(m * n) { rnd.nextDouble } val lda = m @@ -309,31 +183,14 @@ object BLASBenchmark extends BenchmarkBase { val beta = rnd.nextDouble val y = Array.fill(m) { rnd.nextDouble } - val benchmark = new Benchmark("dgemv[N]", m * n, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.dgemv("N", m, n, alpha, a, lda, x, 1, beta, y.clone, 1) - } - - benchmark.addCase("java") { _ => - javaBLAS.dgemv("N", m, n, alpha, a, lda, x, 1, beta, y.clone, 1) + runBLASBenchmark("dgemv[N]", m * n) { impl => + impl.dgemv("N", m, n, alpha, a, lda, x, 1, beta, y.clone, 1) } - - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.dgemv("N", m, n, alpha, a, lda, x, 1, beta, y.clone, 1) - } - } - - benchmark.run() } runBenchmark("dgemv[T]") { - val m = 1e3.toInt - val n = 1e3.toInt + val m = 1e4.toInt + val n = 1e4.toInt val alpha = rnd.nextDouble val a = Array.fill(m * n) { rnd.nextDouble } val lda = m @@ -341,31 +198,14 @@ object BLASBenchmark extends BenchmarkBase { val beta = rnd.nextDouble val y = Array.fill(n) { rnd.nextDouble } - val benchmark = new Benchmark("dgemv[T]", m * n, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.dgemv("T", m, n, alpha, a, lda, x, 1, beta, y.clone, 1) - } - - benchmark.addCase("java") { _ => - javaBLAS.dgemv("T", m, n, alpha, a, lda, x, 1, beta, y.clone, 1) - } - - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.dgemv("T", m, n, alpha, a, lda, x, 1, beta, y.clone, 1) - } + runBLASBenchmark("dgemv[T]", m * n) { impl => + impl.dgemv("T", m, n, alpha, a, lda, x, 1, beta, y.clone, 1) } - - benchmark.run() } runBenchmark("sgemv[N]") { - val m = 1e3.toInt - val n = 1e3.toInt + val m = 1e4.toInt + val n = 1e4.toInt val alpha = rnd.nextFloat val a = Array.fill(m * n) { rnd.nextFloat } val lda = m @@ -373,31 +213,14 @@ object BLASBenchmark extends BenchmarkBase { val beta = rnd.nextFloat val y = Array.fill(m) { rnd.nextFloat } - val benchmark = new Benchmark("sgemv[N]", m * n, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.sgemv("N", m, n, alpha, a, lda, x, 1, beta, y.clone, 1) - } - - benchmark.addCase("java") { _ => - javaBLAS.sgemv("N", m, n, alpha, a, lda, x, 1, beta, y.clone, 1) - } - - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.sgemv("N", m, n, alpha, a, lda, x, 1, beta, y.clone, 1) - } + runBLASBenchmark("sgemv[N]", m * n) { impl => + impl.sgemv("N", m, n, alpha, a, lda, x, 1, beta, y.clone, 1) } - - benchmark.run() } runBenchmark("sgemv[T]") { - val m = 1e3.toInt - val n = 1e3.toInt + val m = 1e4.toInt + val n = 1e4.toInt val alpha = rnd.nextFloat val a = Array.fill(m * n) { rnd.nextFloat } val lda = m @@ -405,26 +228,59 @@ object BLASBenchmark extends BenchmarkBase { val beta = rnd.nextFloat val y = Array.fill(n) { rnd.nextFloat } - val benchmark = new Benchmark("sgemv[T]", m * n, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) + runBLASBenchmark("sgemv[T]", m * n) { impl => + impl.sgemv("T", m, n, alpha, a, lda, x, 1, beta, y.clone, 1) + } + } - benchmark.addCase("f2j") { _ => - f2jBLAS.sgemv("T", m, n, alpha, a, lda, x, 1, beta, y.clone, 1) + runBenchmark("dger") { + val m = 1e4.toInt + val n = 1e4.toInt + val alpha = rnd.nextDouble + val a = Array.fill(m * n) { rnd.nextDouble } + val lda = m + val x = Array.fill(n) { rnd.nextDouble } + val beta = rnd.nextDouble + val y = Array.fill(m) { rnd.nextDouble } + + runBLASBenchmark("dger", m * n) { impl => + impl.dger(m, n, alpha, x, 1, y, 1, a.clone(), m) } + } - benchmark.addCase("java") { _ => - javaBLAS.sgemv("T", m, n, alpha, a, lda, x, 1, beta, y.clone, 1) + runBenchmark("dspmv[U]") { + val n = 1e4.toInt + val alpha = rnd.nextDouble + val a = Array.fill(n * (n + 1) / 2) { rnd.nextDouble } + val x = Array.fill(n) { rnd.nextDouble } + val beta = rnd.nextDouble + val y = Array.fill(n) { rnd.nextDouble } + + runBLASBenchmark("dspmv[U]", n * (n + 1) / 2) { impl => + impl.dspmv("U", n, alpha, a, x, 1, beta, y.clone, 1) } + } - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.sgemv("T", m, n, alpha, a, lda, x, 1, beta, y.clone, 1) - } + runBenchmark("dspr[U]") { + val n = 1e4.toInt + val alpha = rnd.nextDouble + val x = Array.fill(n) { rnd.nextDouble } + val a = Array.fill(n * (n + 1) / 2) { rnd.nextDouble } + + runBLASBenchmark("dspr[U]", n * (n + 1) / 2) { impl => + impl.dspr("U", n, alpha, x, 1, a.clone) } + } - benchmark.run() + runBenchmark("dsyr[U]") { + val n = 1e4.toInt + val alpha = rnd.nextDouble + val x = Array.fill(n) { rnd.nextDouble } + val a = Array.fill(n * n) { rnd.nextDouble } + + runBLASBenchmark("dsyr[U]", n * (n + 1) / 2) { impl => + impl.dsyr("U", n, alpha, x, 1, a.clone, n) + } } runBenchmark("dgemm[N,N]") { @@ -440,26 +296,9 @@ object BLASBenchmark extends BenchmarkBase { val c = Array.fill(m * n) { rnd.nextDouble } var ldc = m - val benchmark = new Benchmark("dgemm[N,N]", m * n * k, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.dgemm("N", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } - - benchmark.addCase("java") { _ => - javaBLAS.dgemm("N", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } - - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.dgemm("N", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } + runBLASBenchmark("dgemm[N,N]", m * n * k) { impl => + impl.dgemm("N", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) } - - benchmark.run() } runBenchmark("dgemm[N,T]") { @@ -475,26 +314,9 @@ object BLASBenchmark extends BenchmarkBase { val c = Array.fill(m * n) { rnd.nextDouble } var ldc = m - val benchmark = new Benchmark("dgemm[N,T]", m * n * k, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.dgemm("N", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) + runBLASBenchmark("dgemm[N,T]", m * n * k) { impl => + impl.dgemm("N", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) } - - benchmark.addCase("java") { _ => - javaBLAS.dgemm("N", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } - - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.dgemm("N", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } - } - - benchmark.run() } runBenchmark("dgemm[T,N]") { @@ -510,26 +332,9 @@ object BLASBenchmark extends BenchmarkBase { val c = Array.fill(m * n) { rnd.nextDouble } var ldc = m - val benchmark = new Benchmark("dgemm[T,N]", m * n * k, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.dgemm("T", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } - - benchmark.addCase("java") { _ => - javaBLAS.dgemm("T", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } - - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.dgemm("T", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } + runBLASBenchmark("dgemm[T,N]", m * n * k) { impl => + impl.dgemm("T", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) } - - benchmark.run() } runBenchmark("dgemm[T,T]") { @@ -545,26 +350,9 @@ object BLASBenchmark extends BenchmarkBase { val c = Array.fill(m * n) { rnd.nextDouble } var ldc = m - val benchmark = new Benchmark("dgemm[T,T]", m * n * k, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.dgemm("T", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) + runBLASBenchmark("dgemm[T,T]", m * n * k) { impl => + impl.dgemm("T", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) } - - benchmark.addCase("java") { _ => - javaBLAS.dgemm("T", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } - - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.dgemm("T", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } - } - - benchmark.run() } runBenchmark("sgemm[N,N]") { @@ -580,26 +368,9 @@ object BLASBenchmark extends BenchmarkBase { val c = Array.fill(m * n) { rnd.nextFloat } var ldc = m - val benchmark = new Benchmark("sgemm[N,N]", m * n * k, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.sgemm("N", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } - - benchmark.addCase("java") { _ => - javaBLAS.sgemm("N", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } - - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.sgemm("N", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } + runBLASBenchmark("sgemm[N,N]", m * n * k) { impl => + impl.sgemm("N", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) } - - benchmark.run() } runBenchmark("sgemm[N,T]") { @@ -615,26 +386,9 @@ object BLASBenchmark extends BenchmarkBase { val c = Array.fill(m * n) { rnd.nextFloat } var ldc = m - val benchmark = new Benchmark("sgemm[N,T]", m * n * k, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.sgemm("N", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } - - benchmark.addCase("java") { _ => - javaBLAS.sgemm("N", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } - - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.sgemm("N", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } + runBLASBenchmark("sgemm[N,T]", m * n * k) { impl => + impl.sgemm("N", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) } - - benchmark.run() } runBenchmark("sgemm[T,N]") { @@ -650,26 +404,9 @@ object BLASBenchmark extends BenchmarkBase { val c = Array.fill(m * n) { rnd.nextFloat } var ldc = m - val benchmark = new Benchmark("sgemm[T,N]", m * n * k, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.sgemm("T", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) + runBLASBenchmark("sgemm[T,N]", m * n * k) { impl => + impl.sgemm("T", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) } - - benchmark.addCase("java") { _ => - javaBLAS.sgemm("T", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } - - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.sgemm("T", "N", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } - } - - benchmark.run() } runBenchmark("sgemm[T,T]") { @@ -685,26 +422,9 @@ object BLASBenchmark extends BenchmarkBase { val c = Array.fill(m * n) { rnd.nextFloat } var ldc = m - val benchmark = new Benchmark("sgemm[T,T]", m * n * k, iters, - warmupTime = 30.seconds, - minTime = 30.seconds, - output = output) - - benchmark.addCase("f2j") { _ => - f2jBLAS.sgemm("T", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } - - benchmark.addCase("java") { _ => - javaBLAS.sgemm("T", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) + runBLASBenchmark("sgemm[T,T]", m * n * k) { impl => + impl.sgemm("T", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) } - - if (nativeBLAS != javaBLAS) { - benchmark.addCase("native") { _ => - nativeBLAS.sgemm("T", "T", m, n, k, alpha, a, lda, b, ldb, beta, c.clone, ldc) - } - } - - benchmark.run() } } } From 034bef6a167e83c87023bec9945534e17ebf5250 Mon Sep 17 00:00:00 2001 From: Ludovic Henry Date: Tue, 27 Apr 2021 22:17:02 +0200 Subject: [PATCH 02/15] Bump to dev.ludovic.netlib:2-SNAPSHOT --- pom.xml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 827b405df3677..b6a0d2b40d9ff 100644 --- a/pom.xml +++ b/pom.xml @@ -172,7 +172,7 @@ 2.12.2 1.1.8.4 1.1.2 - 1.3.2 + 2-SNAPSHOT 1.15 1.20 2.8.0 @@ -298,6 +298,12 @@ false + + + + Sonatype OSS Snapshots + https://oss.sonatype.org/content/repositories/snapshots/ + From 2dbf90bd3853eeb7265681973f9cf2397886dca7 Mon Sep 17 00:00:00 2001 From: Ludovic Henry Date: Wed, 28 Apr 2021 00:54:28 +0200 Subject: [PATCH 03/15] Try to make it build on CI --- pom.xml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index b6a0d2b40d9ff..755bfbac43f7a 100644 --- a/pom.xml +++ b/pom.xml @@ -301,8 +301,14 @@ - Sonatype OSS Snapshots + sonatype-oss-snapshots https://oss.sonatype.org/content/repositories/snapshots/ + + true + + + true + From 6909f0a26a86dbf818745f381888498d307c2e8c Mon Sep 17 00:00:00 2001 From: Ludovic Henry Date: Wed, 28 Apr 2021 01:03:37 +0200 Subject: [PATCH 04/15] Try to make it build on CI (take 2) --- pom.xml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pom.xml b/pom.xml index 755bfbac43f7a..1ea69e135d67d 100644 --- a/pom.xml +++ b/pom.xml @@ -337,6 +337,17 @@ false + + + sonatype-oss-snapshots + https://oss.sonatype.org/content/repositories/snapshots/ + + true + + + true + + + + netlib-lgpl + From 9dab1f29333dc8e3420d537a01e7b51257f0e4fd Mon Sep 17 00:00:00 2001 From: Ludovic Henry Date: Sun, 2 May 2021 12:48:14 +0200 Subject: [PATCH 08/15] Disable caching on CI because of -SNAPSHOT repository --- .github/workflows/build_and_test.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 47c7261df7150..2dffaf019f88e 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -105,13 +105,13 @@ jobs: key: build-${{ hashFiles('**/pom.xml', 'project/build.properties', 'build/mvn', 'build/sbt', 'build/sbt-launch-lib.bash', 'build/spark-build-info') }} restore-keys: | build- - - name: Cache Coursier local repository - uses: actions/cache@v2 - with: - path: ~/.cache/coursier - key: ${{ matrix.java }}-${{ matrix.hadoop }}-coursier-${{ hashFiles('**/pom.xml', '**/plugins.sbt') }} - restore-keys: | - ${{ matrix.java }}-${{ matrix.hadoop }}-coursier- + # - name: Cache Coursier local repository + # uses: actions/cache@v2 + # with: + # path: ~/.cache/coursier + # key: ${{ matrix.java }}-${{ matrix.hadoop }}-coursier-${{ hashFiles('**/pom.xml', '**/plugins.sbt') }} + # restore-keys: | + # ${{ matrix.java }}-${{ matrix.hadoop }}-coursier- - name: Install Java ${{ matrix.java }} uses: actions/setup-java@v1 with: From 8961a6f8d4295f6ff3801075d700fca18c26f399 Mon Sep 17 00:00:00 2001 From: Ludovic Henry Date: Mon, 3 May 2021 10:34:56 +0200 Subject: [PATCH 09/15] Update docs --- docs/ml-guide.md | 7 +++---- docs/ml-linalg-guide.md | 34 +++++++++++----------------------- 2 files changed, 14 insertions(+), 27 deletions(-) diff --git a/docs/ml-guide.md b/docs/ml-guide.md index 1b4a3e4eee557..3202647240b92 100644 --- a/docs/ml-guide.md +++ b/docs/ml-guide.md @@ -62,12 +62,11 @@ The primary Machine Learning API for Spark is now the [DataFrame](sql-programmin # Dependencies -MLlib uses linear algebra packages [Breeze](http://www.scalanlp.org/) and [netlib-java](https://github.com/fommil/netlib-java) for optimised numerical processing[^1]. Those packages may call native acceleration libraries such as [Intel MKL](https://software.intel.com/content/www/us/en/develop/tools/math-kernel-library.html) or [OpenBLAS](http://www.openblas.net) if they are available as system libraries or in runtime library paths. +MLlib uses linear algebra packages [Breeze](http://www.scalanlp.org/), [dev.ludovic.netlib](https://github.com/luhenry/netlib), and [netlib-java](https://github.com/fommil/netlib-java) for optimised numerical processing[^1]. Those packages may call native acceleration libraries such as [Intel MKL](https://software.intel.com/content/www/us/en/develop/tools/math-kernel-library.html) or [OpenBLAS](http://www.openblas.net) if they are available as system libraries or in runtime library paths. -Due to differing OSS licenses, `netlib-java`'s native proxies can't be distributed with Spark. See [MLlib Linear Algebra Acceleration Guide](ml-linalg-guide.html) for how to enable accelerated linear algebra processing. If accelerated native libraries are not enabled, you will see a warning message like below and a pure JVM implementation will be used instead: +However, native acceleration libraries can't be distributed with Spark. See [MLlib Linear Algebra Acceleration Guide](ml-linalg-guide.html) for how to enable accelerated linear algebra processing. If accelerated native libraries are not enabled, you will see a warning message like below and a pure JVM implementation will be used instead: ``` -WARN BLAS: Failed to load implementation from:com.github.fommil.netlib.NativeSystemBLAS -WARN BLAS: Failed to load implementation from:com.github.fommil.netlib.NativeRefBLAS +WARN BLAS: Failed to load implementation from:dev.ludovic.netlib.blas.JNIBLAS ``` To use MLlib in Python, you will need [NumPy](http://www.numpy.org) version 1.4 or newer. diff --git a/docs/ml-linalg-guide.md b/docs/ml-linalg-guide.md index 719554af5a2d2..76d36a00fdfe3 100644 --- a/docs/ml-linalg-guide.md +++ b/docs/ml-linalg-guide.md @@ -21,29 +21,15 @@ license: | This guide provides necessary information to enable accelerated linear algebra processing for Spark MLlib. -Spark MLlib defines Vector and Matrix as basic data types for machine learning algorithms. On top of them, [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) and [LAPACK](https://en.wikipedia.org/wiki/LAPACK) operations are implemented and supported by [netlib-java](https://github.com/fommil/netlib-Java) (the algorithms may call [Breeze](https://github.com/scalanlp/breeze) and it will in turn call `netlib-java`). `netlib-java` can use optimized native linear algebra libraries (refered to as "native libraries" or "BLAS libraries" hereafter) for faster numerical processing. [Intel MKL](https://software.intel.com/content/www/us/en/develop/tools/math-kernel-library.html) and [OpenBLAS](http://www.openblas.net) are two popular ones. +Spark MLlib defines Vector and Matrix as basic data types for machine learning algorithms. On top of them, [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) and [LAPACK](https://en.wikipedia.org/wiki/LAPACK) operations are implemented and supported by [dev.ludovic.netlib](https://github.com/luhenry/netlib) (the algorithms may call [Breeze](https://github.com/scalanlp/breeze) and it will in turn call [netlib-java](https://github.com/fommil/netlib-Java)). `dev.ludovic.netlib` can use optimized native linear algebra libraries (refered to as "native libraries" or "BLAS libraries" hereafter) for faster numerical processing. [Intel MKL](https://software.intel.com/content/www/us/en/develop/tools/math-kernel-library.html) and [OpenBLAS](http://www.openblas.net) are two popular ones. -However due to license differences, the official released Spark binaries by default don't contain native libraries support for `netlib-java`. +However due to license differences, the official released Spark binaries by default don't contain these native libraries. -The following sections describe how to enable `netlib-java` with native libraries support for Spark MLlib and how to install native libraries and configure them properly. - -## Enable `netlib-java` with native library proxies - -`netlib-java` depends on `libgfortran`. It requires GFORTRAN 1.4 or above. This can be obtained by installing `libgfortran` package. After installation, the following command can be used to verify if it is installed properly. -``` -strings /path/to/libgfortran.so.3.0.0 | grep GFORTRAN_1.4 -``` - -To build Spark with `netlib-java` native library proxies, you need to add `-Pnetlib-lgpl` to Maven build command line. For example: -``` -$SPARK_SOURCE_HOME/build/mvn -Pnetlib-lgpl -DskipTests -Pyarn -Phadoop-2.7 clean package -``` - -If you only want to enable it in your project, include `com.github.fommil.netlib:all:1.1.2` as a dependency of your project. +The following sections describe how to install native libraries, configure them properly, and how to point `dev.ludovic.netlib` to these native libraries. ## Install native linear algebra libraries -Intel MKL and OpenBLAS are two popular native linear algebra libraries. You can choose one of them based on your preference. We provide basic instructions as below. You can refer to [netlib-java documentation](https://github.com/fommil/netlib-java) for more advanced installation instructions. +Intel MKL and OpenBLAS are two popular native linear algebra libraries. You can choose one of them based on your preference. We provide basic instructions as below. ### Intel MKL @@ -72,14 +58,16 @@ sudo yum install openblas To verify native libraries are properly loaded, start `spark-shell` and run the following code: ``` -scala> import com.github.fommil.netlib.BLAS; -scala> System.out.println(BLAS.getInstance().getClass().getName()); +scala> import dev.ludovic.netlib.NativeBLAS +scala> NativeBLAS.getInstance() ``` -If they are correctly loaded, it should print `com.github.fommil.netlib.NativeSystemBLAS`. Otherwise the warnings should be printed: +If they are correctly loaded, it should print `dev.ludovic.netlib.NativeBLAS = dev.ludovic.netlib.blas.JNIBLAS@...`. Otherwise the warnings should be printed: ``` -WARN BLAS: Failed to load implementation from:com.github.fommil.netlib.NativeSystemBLAS -WARN BLAS: Failed to load implementation from:com.github.fommil.netlib.NativeRefBLAS +WARN NativeBLAS: Failed to load implementation from:dev.ludovic.netlib.blas.JNIBLAS +java.lang.RuntimeException: Unable to load native implementation + at dev.ludovic.netlib.NativeBLAS.getInstance(NativeBLAS.java:44) + ... ``` If native libraries are not properly configured in the system, the Java implementation (javaBLAS) will be used as fallback option. From 46c32410e1e0a1a01311ad6edc329c0279f8ef6f Mon Sep 17 00:00:00 2001 From: Ludovic Henry Date: Mon, 3 May 2021 11:30:42 +0200 Subject: [PATCH 10/15] fixup! Update docs --- docs/ml-linalg-guide.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/ml-linalg-guide.md b/docs/ml-linalg-guide.md index 76d36a00fdfe3..1e3d9ccbc82ea 100644 --- a/docs/ml-linalg-guide.md +++ b/docs/ml-linalg-guide.md @@ -21,9 +21,9 @@ license: | This guide provides necessary information to enable accelerated linear algebra processing for Spark MLlib. -Spark MLlib defines Vector and Matrix as basic data types for machine learning algorithms. On top of them, [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) and [LAPACK](https://en.wikipedia.org/wiki/LAPACK) operations are implemented and supported by [dev.ludovic.netlib](https://github.com/luhenry/netlib) (the algorithms may call [Breeze](https://github.com/scalanlp/breeze) and it will in turn call [netlib-java](https://github.com/fommil/netlib-Java)). `dev.ludovic.netlib` can use optimized native linear algebra libraries (refered to as "native libraries" or "BLAS libraries" hereafter) for faster numerical processing. [Intel MKL](https://software.intel.com/content/www/us/en/develop/tools/math-kernel-library.html) and [OpenBLAS](http://www.openblas.net) are two popular ones. +Spark MLlib defines Vector and Matrix as basic data types for machine learning algorithms. On top of them, [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) and [LAPACK](https://en.wikipedia.org/wiki/LAPACK) operations are implemented and supported by [dev.ludovic.netlib](https://github.com/luhenry/netlib) (the algorithms may also call [Breeze](https://github.com/scalanlp/breeze)). `dev.ludovic.netlib` can use optimized native linear algebra libraries (refered to as "native libraries" or "BLAS libraries" hereafter) for faster numerical processing. [Intel MKL](https://software.intel.com/content/www/us/en/develop/tools/math-kernel-library.html) and [OpenBLAS](http://www.openblas.net) are two popular ones. -However due to license differences, the official released Spark binaries by default don't contain these native libraries. +The official released Spark binaries don't contain these native libraries. The following sections describe how to install native libraries, configure them properly, and how to point `dev.ludovic.netlib` to these native libraries. @@ -70,6 +70,8 @@ java.lang.RuntimeException: Unable to load native implementation ... ``` +You can also point `dev.ludovic.netlib` to specific libraries names and paths. For example, `-Ddev.ludovic.netlib.blas.nativeLib=libmkl_rt.so` or `-Ddev.ludovic.netlib.blas.nativeLibPath=$MKLROOT/lib/intel64/libmkl_rt.so` for Intel MKL. You have similar parameters for LAPACK and ARPACK: `-Ddev.ludovic.netlib.lapack.nativeLib=...`, `-Ddev.ludovic.netlib.lapack.nativeLibPath=...`, `-Ddev.ludovic.netlib.arpack.nativeLib=...`, and `-Ddev.ludovic.netlib.arpack.nativeLibPath=...`. + If native libraries are not properly configured in the system, the Java implementation (javaBLAS) will be used as fallback option. ## Spark Configuration From 68b5425311e0c645ee7144f82c14185c9f779b20 Mon Sep 17 00:00:00 2001 From: Ludovic Henry Date: Mon, 3 May 2021 13:40:21 +0200 Subject: [PATCH 11/15] Bump to dev.ludovic.netlib:2.0.0 --- .github/workflows/build_and_test.yml | 14 +++++++------- pom.xml | 25 +------------------------ project/SparkBuild.scala | 2 -- 3 files changed, 8 insertions(+), 33 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 2dffaf019f88e..47c7261df7150 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -105,13 +105,13 @@ jobs: key: build-${{ hashFiles('**/pom.xml', 'project/build.properties', 'build/mvn', 'build/sbt', 'build/sbt-launch-lib.bash', 'build/spark-build-info') }} restore-keys: | build- - # - name: Cache Coursier local repository - # uses: actions/cache@v2 - # with: - # path: ~/.cache/coursier - # key: ${{ matrix.java }}-${{ matrix.hadoop }}-coursier-${{ hashFiles('**/pom.xml', '**/plugins.sbt') }} - # restore-keys: | - # ${{ matrix.java }}-${{ matrix.hadoop }}-coursier- + - name: Cache Coursier local repository + uses: actions/cache@v2 + with: + path: ~/.cache/coursier + key: ${{ matrix.java }}-${{ matrix.hadoop }}-coursier-${{ hashFiles('**/pom.xml', '**/plugins.sbt') }} + restore-keys: | + ${{ matrix.java }}-${{ matrix.hadoop }}-coursier- - name: Install Java ${{ matrix.java }} uses: actions/setup-java@v1 with: diff --git a/pom.xml b/pom.xml index ac2fbe912df0a..429fbcbc74472 100644 --- a/pom.xml +++ b/pom.xml @@ -172,7 +172,7 @@ 2.12.2 1.1.8.4 1.1.2 - 2-SNAPSHOT + 2.0.0 1.15 1.20 2.8.0 @@ -298,18 +298,6 @@ false - - - - sonatype-oss-snapshots - https://oss.sonatype.org/content/repositories/snapshots/ - - true - - - true - - @@ -337,17 +325,6 @@ false - - - sonatype-oss-snapshots - https://oss.sonatype.org/content/repositories/snapshots/ - - true - - - true - - netlib-lgpl + + + com.github.fommil.netlib + all + ${netlib.java.version} + pom + + From cd9b5c88f0aa9365e4fcabb6d36ded7488a8442e Mon Sep 17 00:00:00 2001 From: Ludovic Henry Date: Sun, 9 May 2021 10:11:44 +0200 Subject: [PATCH 14/15] Bump to dev.ludovic.netlib:2.1.0 --- dev/deps/spark-deps-hadoop-2.7-hive-2.3 | 6 +++--- dev/deps/spark-deps-hadoop-3.2-hive-2.3 | 6 +++--- pom.xml | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/dev/deps/spark-deps-hadoop-2.7-hive-2.3 b/dev/deps/spark-deps-hadoop-2.7-hive-2.3 index 335d8d780d4f4..f2aa3e5b571f2 100644 --- a/dev/deps/spark-deps-hadoop-2.7-hive-2.3 +++ b/dev/deps/spark-deps-hadoop-2.7-hive-2.3 @@ -15,7 +15,7 @@ apacheds-i18n/2.0.0-M15//apacheds-i18n-2.0.0-M15.jar apacheds-kerberos-codec/2.0.0-M15//apacheds-kerberos-codec-2.0.0-M15.jar api-asn1-api/1.0.0-M20//api-asn1-api-1.0.0-M20.jar api-util/1.0.0-M20//api-util-1.0.0-M20.jar -arpack/2.0.0//arpack-2.0.0.jar +arpack/2.1.0//arpack-2.1.0.jar arpack_combined_all/0.1//arpack_combined_all-0.1.jar arrow-format/2.0.0//arrow-format-2.0.0.jar arrow-memory-core/2.0.0//arrow-memory-core-2.0.0.jar @@ -26,7 +26,7 @@ automaton/1.11-8//automaton-1.11-8.jar avro-ipc/1.10.2//avro-ipc-1.10.2.jar avro-mapred/1.10.2//avro-mapred-1.10.2.jar avro/1.10.2//avro-1.10.2.jar -blas/2.0.0//blas-2.0.0.jar +blas/2.1.0//blas-2.1.0.jar bonecp/0.8.0.RELEASE//bonecp-0.8.0.RELEASE.jar breeze-macros_2.12/1.0//breeze-macros_2.12-1.0.jar breeze_2.12/1.0//breeze_2.12-1.0.jar @@ -174,7 +174,7 @@ kubernetes-model-policy/5.3.1//kubernetes-model-policy-5.3.1.jar kubernetes-model-rbac/5.3.1//kubernetes-model-rbac-5.3.1.jar kubernetes-model-scheduling/5.3.1//kubernetes-model-scheduling-5.3.1.jar kubernetes-model-storageclass/5.3.1//kubernetes-model-storageclass-5.3.1.jar -lapack/2.0.0//lapack-2.0.0.jar +lapack/2.1.0//lapack-2.1.0.jar leveldbjni-all/1.8//leveldbjni-all-1.8.jar libfb303/0.9.3//libfb303-0.9.3.jar libthrift/0.12.0//libthrift-0.12.0.jar diff --git a/dev/deps/spark-deps-hadoop-3.2-hive-2.3 b/dev/deps/spark-deps-hadoop-3.2-hive-2.3 index b16976f1b7589..318a3d2d7a5a0 100644 --- a/dev/deps/spark-deps-hadoop-3.2-hive-2.3 +++ b/dev/deps/spark-deps-hadoop-3.2-hive-2.3 @@ -10,7 +10,7 @@ annotations/17.0.0//annotations-17.0.0.jar antlr-runtime/3.5.2//antlr-runtime-3.5.2.jar antlr4-runtime/4.8-1//antlr4-runtime-4.8-1.jar aopalliance-repackaged/2.6.1//aopalliance-repackaged-2.6.1.jar -arpack/2.0.0//arpack-2.0.0.jar +arpack/2.1.0//arpack-2.1.0.jar arpack_combined_all/0.1//arpack_combined_all-0.1.jar arrow-format/2.0.0//arrow-format-2.0.0.jar arrow-memory-core/2.0.0//arrow-memory-core-2.0.0.jar @@ -21,7 +21,7 @@ automaton/1.11-8//automaton-1.11-8.jar avro-ipc/1.10.2//avro-ipc-1.10.2.jar avro-mapred/1.10.2//avro-mapred-1.10.2.jar avro/1.10.2//avro-1.10.2.jar -blas/2.0.0//blas-2.0.0.jar +blas/2.1.0//blas-2.1.0.jar bonecp/0.8.0.RELEASE//bonecp-0.8.0.RELEASE.jar breeze-macros_2.12/1.0//breeze-macros_2.12-1.0.jar breeze_2.12/1.0//breeze_2.12-1.0.jar @@ -145,7 +145,7 @@ kubernetes-model-policy/5.3.1//kubernetes-model-policy-5.3.1.jar kubernetes-model-rbac/5.3.1//kubernetes-model-rbac-5.3.1.jar kubernetes-model-scheduling/5.3.1//kubernetes-model-scheduling-5.3.1.jar kubernetes-model-storageclass/5.3.1//kubernetes-model-storageclass-5.3.1.jar -lapack/2.0.0//lapack-2.0.0.jar +lapack/2.1.0//lapack-2.1.0.jar leveldbjni-all/1.8//leveldbjni-all-1.8.jar libfb303/0.9.3//libfb303-0.9.3.jar libthrift/0.12.0//libthrift-0.12.0.jar diff --git a/pom.xml b/pom.xml index 2a0330252fede..d6100bd923b09 100644 --- a/pom.xml +++ b/pom.xml @@ -172,7 +172,7 @@ 2.12.2 1.1.8.4 1.1.2 - 2.0.0 + 2.1.0 1.15 1.20 2.8.0 From 420802efbf9aabe3d3f709ec21102510b51dcfc0 Mon Sep 17 00:00:00 2001 From: Ludovic Henry Date: Sun, 9 May 2021 18:13:21 +0200 Subject: [PATCH 15/15] Bump to dev.ludovic.netlib:2.2.0 --- dev/deps/spark-deps-hadoop-2.7-hive-2.3 | 6 +++--- dev/deps/spark-deps-hadoop-3.2-hive-2.3 | 6 +++--- pom.xml | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/dev/deps/spark-deps-hadoop-2.7-hive-2.3 b/dev/deps/spark-deps-hadoop-2.7-hive-2.3 index f2aa3e5b571f2..c496c6a5b9707 100644 --- a/dev/deps/spark-deps-hadoop-2.7-hive-2.3 +++ b/dev/deps/spark-deps-hadoop-2.7-hive-2.3 @@ -15,7 +15,7 @@ apacheds-i18n/2.0.0-M15//apacheds-i18n-2.0.0-M15.jar apacheds-kerberos-codec/2.0.0-M15//apacheds-kerberos-codec-2.0.0-M15.jar api-asn1-api/1.0.0-M20//api-asn1-api-1.0.0-M20.jar api-util/1.0.0-M20//api-util-1.0.0-M20.jar -arpack/2.1.0//arpack-2.1.0.jar +arpack/2.2.0//arpack-2.2.0.jar arpack_combined_all/0.1//arpack_combined_all-0.1.jar arrow-format/2.0.0//arrow-format-2.0.0.jar arrow-memory-core/2.0.0//arrow-memory-core-2.0.0.jar @@ -26,7 +26,7 @@ automaton/1.11-8//automaton-1.11-8.jar avro-ipc/1.10.2//avro-ipc-1.10.2.jar avro-mapred/1.10.2//avro-mapred-1.10.2.jar avro/1.10.2//avro-1.10.2.jar -blas/2.1.0//blas-2.1.0.jar +blas/2.2.0//blas-2.2.0.jar bonecp/0.8.0.RELEASE//bonecp-0.8.0.RELEASE.jar breeze-macros_2.12/1.0//breeze-macros_2.12-1.0.jar breeze_2.12/1.0//breeze_2.12-1.0.jar @@ -174,7 +174,7 @@ kubernetes-model-policy/5.3.1//kubernetes-model-policy-5.3.1.jar kubernetes-model-rbac/5.3.1//kubernetes-model-rbac-5.3.1.jar kubernetes-model-scheduling/5.3.1//kubernetes-model-scheduling-5.3.1.jar kubernetes-model-storageclass/5.3.1//kubernetes-model-storageclass-5.3.1.jar -lapack/2.1.0//lapack-2.1.0.jar +lapack/2.2.0//lapack-2.2.0.jar leveldbjni-all/1.8//leveldbjni-all-1.8.jar libfb303/0.9.3//libfb303-0.9.3.jar libthrift/0.12.0//libthrift-0.12.0.jar diff --git a/dev/deps/spark-deps-hadoop-3.2-hive-2.3 b/dev/deps/spark-deps-hadoop-3.2-hive-2.3 index 318a3d2d7a5a0..d17a768d415b7 100644 --- a/dev/deps/spark-deps-hadoop-3.2-hive-2.3 +++ b/dev/deps/spark-deps-hadoop-3.2-hive-2.3 @@ -10,7 +10,7 @@ annotations/17.0.0//annotations-17.0.0.jar antlr-runtime/3.5.2//antlr-runtime-3.5.2.jar antlr4-runtime/4.8-1//antlr4-runtime-4.8-1.jar aopalliance-repackaged/2.6.1//aopalliance-repackaged-2.6.1.jar -arpack/2.1.0//arpack-2.1.0.jar +arpack/2.2.0//arpack-2.2.0.jar arpack_combined_all/0.1//arpack_combined_all-0.1.jar arrow-format/2.0.0//arrow-format-2.0.0.jar arrow-memory-core/2.0.0//arrow-memory-core-2.0.0.jar @@ -21,7 +21,7 @@ automaton/1.11-8//automaton-1.11-8.jar avro-ipc/1.10.2//avro-ipc-1.10.2.jar avro-mapred/1.10.2//avro-mapred-1.10.2.jar avro/1.10.2//avro-1.10.2.jar -blas/2.1.0//blas-2.1.0.jar +blas/2.2.0//blas-2.2.0.jar bonecp/0.8.0.RELEASE//bonecp-0.8.0.RELEASE.jar breeze-macros_2.12/1.0//breeze-macros_2.12-1.0.jar breeze_2.12/1.0//breeze_2.12-1.0.jar @@ -145,7 +145,7 @@ kubernetes-model-policy/5.3.1//kubernetes-model-policy-5.3.1.jar kubernetes-model-rbac/5.3.1//kubernetes-model-rbac-5.3.1.jar kubernetes-model-scheduling/5.3.1//kubernetes-model-scheduling-5.3.1.jar kubernetes-model-storageclass/5.3.1//kubernetes-model-storageclass-5.3.1.jar -lapack/2.1.0//lapack-2.1.0.jar +lapack/2.2.0//lapack-2.2.0.jar leveldbjni-all/1.8//leveldbjni-all-1.8.jar libfb303/0.9.3//libfb303-0.9.3.jar libthrift/0.12.0//libthrift-0.12.0.jar diff --git a/pom.xml b/pom.xml index d6100bd923b09..6ced603d0c7f2 100644 --- a/pom.xml +++ b/pom.xml @@ -172,7 +172,7 @@ 2.12.2 1.1.8.4 1.1.2 - 2.1.0 + 2.2.0 1.15 1.20 2.8.0