From a6843c08070b3454330242158fb7e8b6f620b807 Mon Sep 17 00:00:00 2001 From: driazati Date: Mon, 23 May 2022 11:06:27 -0700 Subject: [PATCH] [ci] Add more shards This adds a bunch more CPU shards and moves everything to CPU-SMALL. Some Java limitations required splitting up the logic in the templates a bit as well. --- Jenkinsfile | 2273 ++++++++++++++++++++++++++++----------- jenkins/Build.groovy.j2 | 15 +- jenkins/Jenkinsfile.j2 | 1 + jenkins/Test.groovy.j2 | 342 +++--- jenkins/macros.j2 | 44 +- 5 files changed, 1859 insertions(+), 816 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 0b64f9306844..1e207f09059b 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -45,7 +45,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-05-20T13:24:01.371704 +// Generated at 2022-05-23T16:38:45.963400 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils // NOTE: these lines are scanned by docker/dev_common.sh. Please update the regex as needed. --> @@ -484,6 +484,13 @@ def add_microtvm_permissions() { ) } +def add_hexagon_permissions() { + sh( + script: 'find build/hexagon_api_output -type f | xargs chmod +x', + label: 'Add execute permissions for hexagon files', + ) +} + def build() { stage('Build') { environment { @@ -703,6 +710,10 @@ stage('Build') { label: 'Create Hexagon cmake config', ) make(ci_hexagon, 'build', '-j2') + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_build_hexagon_api.sh", + label: 'Build Hexagon API', + ) sh( script: """ set -eux @@ -712,6 +723,7 @@ stage('Build') { aws s3 cp --no-progress build/libtvm_runtime.so s3://${s3_prefix}/hexagon/build/libtvm_runtime.so md5sum build/config.cmake aws s3 cp --no-progress build/config.cmake s3://${s3_prefix}/hexagon/build/config.cmake + aws s3 cp --no-progress build/hexagon_api_output s3://${s3_prefix}/hexagon/build/hexagon_api_output --recursive """, label: 'Upload artifacts to S3', ) @@ -725,24 +737,24 @@ stage('Build') { ) } } -def test() { -stage('Test') { - environment { - SKIP_SLOW_TESTS = "${skip_slow_tests}" - } - parallel( - 'unittest: GPU 1 of 2': { - if (!skip_ci && is_docs_only_build != 1) { - node('GPU') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-gpu") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=gpu', - 'TVM_NUM_SHARDS=2', - 'TVM_SHARD_INDEX=0'], { - sh( + +// We have to do this whacky split of the code from where it's used since the +// JVM limits method length to 64k and we easily exceed that with all this +// autogenerated code. This makes it so each test step is in its own method so +// that each individual method isn't too big. + +def shard_run_unittest_GPU_1_of_3() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-gpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TVM_NUM_SHARDS=3', + 'TVM_SHARD_INDEX=0'], { + sh( script: """ set -eux aws s3 cp --no-progress s3://${s3_prefix}/gpu2/build/libtvm.so build/libtvm.so @@ -757,9 +769,9 @@ stage('Test') { label: 'Download artifacts from S3', ) - cpp_unittest(ci_gpu) + cpp_unittest(ci_gpu) - sh( + sh( script: """ set -eux aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so @@ -774,39 +786,40 @@ stage('Test') { label: 'Download artifacts from S3', ) - ci_setup(ci_gpu) - cpp_unittest(ci_gpu) - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh", - label: 'Run Python GPU unit tests', - ) - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh", - label: 'Run Python GPU integration tests', - ) - }) - } - } finally { - junit 'build/pytest-results/*.xml' + ci_setup(ci_gpu) + cpp_unittest(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh", + label: 'Run Python GPU unit tests', + ) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh", + label: 'Run Python GPU integration tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('unittest: GPU 1 of 2') } - }, - 'unittest: GPU 2 of 2': { - if (!skip_ci && is_docs_only_build != 1) { - node('GPU') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-gpu") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=gpu', - 'TVM_NUM_SHARDS=2', - 'TVM_SHARD_INDEX=1'], { - sh( + } else { + Utils.markStageSkippedForConditional('unittest: GPU 1 of 3') + } +} + +def shard_run_unittest_GPU_2_of_3() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-gpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TVM_NUM_SHARDS=3', + 'TVM_SHARD_INDEX=1'], { + sh( script: """ set -eux aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so @@ -821,86 +834,91 @@ stage('Test') { label: 'Download artifacts from S3', ) - ci_setup(ci_gpu) - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh", - label: 'Run Java unit tests', - ) - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh", - label: 'Run Python GPU unit tests', - ) - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh", - label: 'Run Python GPU integration tests', - ) - }) - } - } finally { - junit 'build/pytest-results/*.xml' + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh", + label: 'Run Java unit tests', + ) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh", + label: 'Run Python GPU unit tests', + ) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh", + label: 'Run Python GPU integration tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('unittest: GPU 2 of 2') } - }, - 'integration: CPU 1 of 2': { - if (!skip_ci && is_docs_only_build != 1) { - node('CPU') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=cpu', - 'TVM_NUM_SHARDS=2', - 'TVM_SHARD_INDEX=0'], { - sh( + } else { + Utils.markStageSkippedForConditional('unittest: GPU 2 of 3') + } +} + +def shard_run_unittest_GPU_3_of_3() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-gpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TVM_NUM_SHARDS=3', + 'TVM_SHARD_INDEX=2'], { + sh( script: """ set -eux - aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libvta_tsim.so build/libvta_tsim.so - md5sum build/libvta_tsim.so - aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm.so build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so md5sum build/libtvm.so - aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libvta_fsim.so build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libvta_fsim.so build/libvta_fsim.so md5sum build/libvta_fsim.so - aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm_runtime.so build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so md5sum build/libtvm_runtime.so - aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/config.cmake build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake md5sum build/config.cmake """, label: 'Download artifacts from S3', ) - ci_setup(ci_cpu) - sh ( - script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", - label: 'Run CPU integration tests', - ) - }) - } - } finally { - junit 'build/pytest-results/*.xml' + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh", + label: 'Run Python GPU unit tests', + ) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh", + label: 'Run Python GPU integration tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('integration: CPU 1 of 2') } - }, - 'integration: CPU 2 of 2': { - if (!skip_ci && is_docs_only_build != 1) { - node('CPU') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=cpu', - 'TVM_NUM_SHARDS=2', - 'TVM_SHARD_INDEX=1'], { - sh( + } else { + Utils.markStageSkippedForConditional('unittest: GPU 3 of 3') + } +} + + +def shard_run_integration_CPU_1_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cpu', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=0'], { + sh( script: """ set -eux aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libvta_tsim.so build/libvta_tsim.so @@ -917,31 +935,35 @@ stage('Test') { label: 'Download artifacts from S3', ) - ci_setup(ci_cpu) - sh ( - script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", - label: 'Run CPU integration tests', - ) - }) - } - } finally { - junit 'build/pytest-results/*.xml' + ci_setup(ci_cpu) + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('integration: CPU 2 of 2') } - }, - 'unittest: CPU': { - if (!skip_ci && is_docs_only_build != 1) { - node('CPU-SMALL') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-cpu") { + } else { + Utils.markStageSkippedForConditional('integration: CPU 1 of 6') + } +} + +def shard_run_integration_CPU_2_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { + try { + init_git() timeout(time: max_time, unit: 'MINUTES') { - try { - init_git() - withEnv(['PLATFORM=cpu'], { - sh( + withEnv([ + 'PLATFORM=cpu', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=1'], { + sh( script: """ set -eux aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libvta_tsim.so build/libvta_tsim.so @@ -958,259 +980,443 @@ stage('Test') { label: 'Download artifacts from S3', ) - ci_setup(ci_cpu) - cpp_unittest(ci_cpu) - python_unittest(ci_cpu) - fsim_test(ci_cpu) - sh ( - script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh", - label: 'Run VTA tests in TSIM', - ) - }) - } finally { - junit 'build/pytest-results/*.xml' - } + ci_setup(ci_cpu) + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('unittest: CPU') } - }, - 'python: i386 1 of 3': { - if (!skip_ci && is_docs_only_build != 1) { - node('CPU-SMALL') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=i386', - 'TVM_NUM_SHARDS=3', - 'TVM_SHARD_INDEX=0'], { - sh( + } else { + Utils.markStageSkippedForConditional('integration: CPU 2 of 6') + } +} + +def shard_run_integration_CPU_3_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cpu', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=2'], { + sh( script: """ set -eux - aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm.so build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libvta_tsim.so build/libvta_tsim.so + md5sum build/libvta_tsim.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm.so build/libtvm.so md5sum build/libtvm.so - aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libvta_fsim.so build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libvta_fsim.so build/libvta_fsim.so md5sum build/libvta_fsim.so - aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm_runtime.so build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm_runtime.so build/libtvm_runtime.so md5sum build/libtvm_runtime.so - aws s3 cp --no-progress s3://${s3_prefix}/i386/build/config.cmake build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/config.cmake build/config.cmake md5sum build/config.cmake """, label: 'Download artifacts from S3', ) - ci_setup(ci_i386) - cpp_unittest(ci_i386) - python_unittest(ci_i386) - sh ( - script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", - label: 'Run i386 integration tests', - ) - fsim_test(ci_i386) - }) - } - } finally { - junit 'build/pytest-results/*.xml' + ci_setup(ci_cpu) + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('python: i386 1 of 3') } - }, - 'python: i386 2 of 3': { - if (!skip_ci && is_docs_only_build != 1) { - node('CPU-SMALL') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=i386', - 'TVM_NUM_SHARDS=3', - 'TVM_SHARD_INDEX=1'], { - sh( + } else { + Utils.markStageSkippedForConditional('integration: CPU 3 of 6') + } +} + +def shard_run_integration_CPU_4_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cpu', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=3'], { + sh( script: """ set -eux - aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm.so build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libvta_tsim.so build/libvta_tsim.so + md5sum build/libvta_tsim.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm.so build/libtvm.so md5sum build/libtvm.so - aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libvta_fsim.so build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libvta_fsim.so build/libvta_fsim.so md5sum build/libvta_fsim.so - aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm_runtime.so build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm_runtime.so build/libtvm_runtime.so md5sum build/libtvm_runtime.so - aws s3 cp --no-progress s3://${s3_prefix}/i386/build/config.cmake build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/config.cmake build/config.cmake md5sum build/config.cmake """, label: 'Download artifacts from S3', ) - ci_setup(ci_i386) - python_unittest(ci_i386) - sh ( - script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", - label: 'Run i386 integration tests', - ) - fsim_test(ci_i386) - }) - } - } finally { - junit 'build/pytest-results/*.xml' + ci_setup(ci_cpu) + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('python: i386 2 of 3') } - }, - 'python: i386 3 of 3': { - if (!skip_ci && is_docs_only_build != 1) { - node('CPU-SMALL') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=i386', - 'TVM_NUM_SHARDS=3', - 'TVM_SHARD_INDEX=2'], { - sh( + } else { + Utils.markStageSkippedForConditional('integration: CPU 4 of 6') + } +} + +def shard_run_integration_CPU_5_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cpu', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=4'], { + sh( script: """ set -eux - aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm.so build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libvta_tsim.so build/libvta_tsim.so + md5sum build/libvta_tsim.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm.so build/libtvm.so md5sum build/libtvm.so - aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libvta_fsim.so build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libvta_fsim.so build/libvta_fsim.so md5sum build/libvta_fsim.so - aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm_runtime.so build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm_runtime.so build/libtvm_runtime.so md5sum build/libtvm_runtime.so - aws s3 cp --no-progress s3://${s3_prefix}/i386/build/config.cmake build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/config.cmake build/config.cmake md5sum build/config.cmake """, label: 'Download artifacts from S3', ) - ci_setup(ci_i386) - python_unittest(ci_i386) - sh ( - script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", - label: 'Run i386 integration tests', - ) - fsim_test(ci_i386) - }) - } - } finally { - junit 'build/pytest-results/*.xml' + ci_setup(ci_cpu) + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('python: i386 3 of 3') } - }, - 'test: Hexagon 1 of 4': { - if (!skip_ci && is_docs_only_build != 1) { - node('CPU-SMALL') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=hexagon', - 'TVM_NUM_SHARDS=4', - 'TVM_SHARD_INDEX=0'], { - sh( + } else { + Utils.markStageSkippedForConditional('integration: CPU 5 of 6') + } +} + +def shard_run_integration_CPU_6_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=cpu', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=5'], { + sh( script: """ set -eux - aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm.so build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libvta_tsim.so build/libvta_tsim.so + md5sum build/libvta_tsim.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm.so build/libtvm.so md5sum build/libtvm.so - aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm_runtime.so build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm_runtime.so build/libtvm_runtime.so md5sum build/libtvm_runtime.so - aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/config.cmake build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/config.cmake build/config.cmake md5sum build/config.cmake """, label: 'Download artifacts from S3', ) - ci_setup(ci_hexagon) - cpp_unittest(ci_hexagon) - sh ( - script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_build_hexagon_api.sh", - label: 'Build Hexagon API', - ) - sh ( - script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", - label: 'Run Hexagon tests', - ) - }) - } - } finally { - junit 'build/pytest-results/*.xml' + ci_setup(ci_cpu) + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('test: Hexagon 1 of 4') } - }, - 'test: Hexagon 2 of 4': { - if (!skip_ci && is_docs_only_build != 1) { - node('CPU-SMALL') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=hexagon', - 'TVM_NUM_SHARDS=4', - 'TVM_SHARD_INDEX=1'], { - sh( + } else { + Utils.markStageSkippedForConditional('integration: CPU 6 of 6') + } +} + + +def shard_run_python_i386_1_of_5() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=i386', + 'TVM_NUM_SHARDS=5', + 'TVM_SHARD_INDEX=0'], { + sh( script: """ set -eux - aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm.so build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm.so build/libtvm.so md5sum build/libtvm.so - aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm_runtime.so build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm_runtime.so build/libtvm_runtime.so md5sum build/libtvm_runtime.so - aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/config.cmake build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/config.cmake build/config.cmake md5sum build/config.cmake """, label: 'Download artifacts from S3', ) - ci_setup(ci_hexagon) - sh ( - script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_build_hexagon_api.sh", - label: 'Build Hexagon API', - ) - sh ( - script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", - label: 'Run Hexagon tests', - ) - }) - } - } finally { - junit 'build/pytest-results/*.xml' + ci_setup(ci_i386) + cpp_unittest(ci_i386) + python_unittest(ci_i386) + sh ( + script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", + label: 'Run i386 integration tests', + ) + fsim_test(ci_i386) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('test: Hexagon 2 of 4') } - }, - 'test: Hexagon 3 of 4': { - if (!skip_ci && is_docs_only_build != 1) { - node('CPU-SMALL') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=hexagon', - 'TVM_NUM_SHARDS=4', - 'TVM_SHARD_INDEX=2'], { - sh( + } else { + Utils.markStageSkippedForConditional('python: i386 1 of 5') + } +} + +def shard_run_python_i386_2_of_5() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=i386', + 'TVM_NUM_SHARDS=5', + 'TVM_SHARD_INDEX=1'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + ci_setup(ci_i386) + python_unittest(ci_i386) + sh ( + script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", + label: 'Run i386 integration tests', + ) + fsim_test(ci_i386) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('python: i386 2 of 5') + } +} + +def shard_run_python_i386_3_of_5() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=i386', + 'TVM_NUM_SHARDS=5', + 'TVM_SHARD_INDEX=2'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + ci_setup(ci_i386) + python_unittest(ci_i386) + sh ( + script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", + label: 'Run i386 integration tests', + ) + fsim_test(ci_i386) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('python: i386 3 of 5') + } +} + +def shard_run_python_i386_4_of_5() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=i386', + 'TVM_NUM_SHARDS=5', + 'TVM_SHARD_INDEX=3'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + ci_setup(ci_i386) + python_unittest(ci_i386) + sh ( + script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", + label: 'Run i386 integration tests', + ) + fsim_test(ci_i386) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('python: i386 4 of 5') + } +} + +def shard_run_python_i386_5_of_5() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=i386', + 'TVM_NUM_SHARDS=5', + 'TVM_SHARD_INDEX=4'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/i386/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + ci_setup(ci_i386) + python_unittest(ci_i386) + sh ( + script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", + label: 'Run i386 integration tests', + ) + fsim_test(ci_i386) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('python: i386 5 of 5') + } +} + + +def shard_run_test_Hexagon_1_of_7() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=hexagon', + 'TVM_NUM_SHARDS=7', + 'TVM_SHARD_INDEX=0'], { + sh( script: """ set -eux aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm.so build/libtvm.so @@ -1219,42 +1425,42 @@ stage('Test') { md5sum build/libtvm_runtime.so aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/config.cmake build/config.cmake md5sum build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/hexagon_api_output build/hexagon_api_output --recursive """, label: 'Download artifacts from S3', ) - ci_setup(ci_hexagon) - sh ( - script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_build_hexagon_api.sh", - label: 'Build Hexagon API', - ) - sh ( - script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", - label: 'Run Hexagon tests', - ) - }) - } - } finally { - junit 'build/pytest-results/*.xml' + add_hexagon_permissions() + ci_setup(ci_hexagon) + cpp_unittest(ci_hexagon) + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", + label: 'Run Hexagon tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('test: Hexagon 3 of 4') } - }, - 'test: Hexagon 4 of 4': { - if (!skip_ci && is_docs_only_build != 1) { - node('CPU-SMALL') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=hexagon', - 'TVM_NUM_SHARDS=4', - 'TVM_SHARD_INDEX=3'], { - sh( + } else { + Utils.markStageSkippedForConditional('test: Hexagon 1 of 7') + } +} + +def shard_run_test_Hexagon_2_of_7() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=hexagon', + 'TVM_NUM_SHARDS=7', + 'TVM_SHARD_INDEX=1'], { + sh( script: """ set -eux aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm.so build/libtvm.so @@ -1263,173 +1469,866 @@ stage('Test') { md5sum build/libtvm_runtime.so aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/config.cmake build/config.cmake md5sum build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/hexagon_api_output build/hexagon_api_output --recursive """, label: 'Download artifacts from S3', ) - ci_setup(ci_hexagon) - sh ( - script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_build_hexagon_api.sh", - label: 'Build Hexagon API', - ) - sh ( - script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", - label: 'Run Hexagon tests', - ) - }) - } - } finally { - junit 'build/pytest-results/*.xml' + add_hexagon_permissions() + ci_setup(ci_hexagon) + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", + label: 'Run Hexagon tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('test: Hexagon 4 of 4') } - }, - 'test: QEMU': { - if (!skip_ci && is_docs_only_build != 1) { - node('CPU-SMALL') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-qemu") { + } else { + Utils.markStageSkippedForConditional('test: Hexagon 2 of 7') + } +} + +def shard_run_test_Hexagon_3_of_7() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { + try { + init_git() timeout(time: max_time, unit: 'MINUTES') { - try { - init_git() - withEnv(['PLATFORM=qemu'], { - sh( + withEnv([ + 'PLATFORM=hexagon', + 'TVM_NUM_SHARDS=7', + 'TVM_SHARD_INDEX=2'], { + sh( script: """ set -eux - aws s3 cp --no-progress s3://${s3_prefix}/qemu/build/libtvm.so build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm.so build/libtvm.so md5sum build/libtvm.so - aws s3 cp --no-progress s3://${s3_prefix}/qemu/build/libtvm_runtime.so build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm_runtime.so build/libtvm_runtime.so md5sum build/libtvm_runtime.so - aws s3 cp --no-progress s3://${s3_prefix}/qemu/build/config.cmake build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/config.cmake build/config.cmake md5sum build/config.cmake - aws s3 cp --no-progress s3://${s3_prefix}/qemu/build/microtvm_template_projects build/microtvm_template_projects --recursive + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/hexagon_api_output build/hexagon_api_output --recursive """, label: 'Download artifacts from S3', ) - add_microtvm_permissions() - ci_setup(ci_qemu) - cpp_unittest(ci_qemu) - sh ( - script: "${docker_run} ${ci_qemu} ./tests/scripts/task_python_microtvm.sh", - label: 'Run microTVM tests', - ) - sh ( - script: "${docker_run} ${ci_qemu} ./tests/scripts/task_demo_microtvm.sh", - label: 'Run microTVM demos', - ) - }) - } finally { - junit 'build/pytest-results/*.xml' - } + add_hexagon_permissions() + ci_setup(ci_hexagon) + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", + label: 'Run Hexagon tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('test: QEMU') } - }, - 'topi: aarch64': { - if (!skip_ci && is_docs_only_build != 1) { - node('ARM') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { + } else { + Utils.markStageSkippedForConditional('test: Hexagon 3 of 7') + } +} + +def shard_run_test_Hexagon_4_of_7() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { + try { + init_git() timeout(time: max_time, unit: 'MINUTES') { - try { - init_git() - withEnv(['PLATFORM=arm'], { - sh( + withEnv([ + 'PLATFORM=hexagon', + 'TVM_NUM_SHARDS=7', + 'TVM_SHARD_INDEX=3'], { + sh( script: """ set -eux - aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm.so build/libtvm.so md5sum build/libtvm.so - aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libvta_fsim.so build/libvta_fsim.so - md5sum build/libvta_fsim.so - aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm_runtime.so build/libtvm_runtime.so md5sum build/libtvm_runtime.so - aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/config.cmake build/config.cmake md5sum build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/hexagon_api_output build/hexagon_api_output --recursive """, label: 'Download artifacts from S3', ) - ci_setup(ci_arm) - cpp_unittest(ci_arm) - sh ( - script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh", - label: 'Run test_arm_compute_lib test', - ) - sh ( - script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_topi.sh", - label: 'Run TOPI tests', - ) - }) - } finally { - junit 'build/pytest-results/*.xml' - } + add_hexagon_permissions() + ci_setup(ci_hexagon) + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", + label: 'Run Hexagon tests', + ) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Hexagon 4 of 7') + } +} + +def shard_run_test_Hexagon_5_of_7() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=hexagon', + 'TVM_NUM_SHARDS=7', + 'TVM_SHARD_INDEX=4'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/config.cmake build/config.cmake + md5sum build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/hexagon_api_output build/hexagon_api_output --recursive + """, + label: 'Download artifacts from S3', + ) + + add_hexagon_permissions() + ci_setup(ci_hexagon) + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", + label: 'Run Hexagon tests', + ) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Hexagon 5 of 7') + } +} + +def shard_run_test_Hexagon_6_of_7() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=hexagon', + 'TVM_NUM_SHARDS=7', + 'TVM_SHARD_INDEX=5'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/config.cmake build/config.cmake + md5sum build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/hexagon_api_output build/hexagon_api_output --recursive + """, + label: 'Download artifacts from S3', + ) + + add_hexagon_permissions() + ci_setup(ci_hexagon) + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", + label: 'Run Hexagon tests', + ) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Hexagon 6 of 7') + } +} + +def shard_run_test_Hexagon_7_of_7() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=hexagon', + 'TVM_NUM_SHARDS=7', + 'TVM_SHARD_INDEX=6'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/config.cmake build/config.cmake + md5sum build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/hexagon/build/hexagon_api_output build/hexagon_api_output --recursive + """, + label: 'Download artifacts from S3', + ) + + add_hexagon_permissions() + ci_setup(ci_hexagon) + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", + label: 'Run Hexagon tests', + ) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('test: Hexagon 7 of 7') + } +} + + +def shard_run_integration_aarch64_1_of_4() { + if (!skip_ci && is_docs_only_build != 1) { + node('ARM') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=arm', + 'TVM_NUM_SHARDS=4', + 'TVM_SHARD_INDEX=0'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + ci_setup(ci_arm) + python_unittest(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('integration: aarch64 1 of 4') + } +} + +def shard_run_integration_aarch64_2_of_4() { + if (!skip_ci && is_docs_only_build != 1) { + node('ARM') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=arm', + 'TVM_NUM_SHARDS=4', + 'TVM_SHARD_INDEX=1'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + ci_setup(ci_arm) + python_unittest(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('integration: aarch64 2 of 4') + } +} + +def shard_run_integration_aarch64_3_of_4() { + if (!skip_ci && is_docs_only_build != 1) { + node('ARM') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=arm', + 'TVM_NUM_SHARDS=4', + 'TVM_SHARD_INDEX=2'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + ci_setup(ci_arm) + python_unittest(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('integration: aarch64 3 of 4') + } +} + +def shard_run_integration_aarch64_4_of_4() { + if (!skip_ci && is_docs_only_build != 1) { + node('ARM') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=arm', + 'TVM_NUM_SHARDS=4', + 'TVM_SHARD_INDEX=3'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + ci_setup(ci_arm) + python_unittest(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('integration: aarch64 4 of 4') + } +} + + +def shard_run_topi_GPU_1_of_4() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/topi-python-gpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TVM_NUM_SHARDS=4', + 'TVM_SHARD_INDEX=0'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh", + label: 'Run TOPI tests', + ) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('topi: GPU 1 of 4') + } +} + +def shard_run_topi_GPU_2_of_4() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/topi-python-gpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TVM_NUM_SHARDS=4', + 'TVM_SHARD_INDEX=1'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh", + label: 'Run TOPI tests', + ) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('topi: GPU 2 of 4') + } +} + +def shard_run_topi_GPU_3_of_4() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/topi-python-gpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TVM_NUM_SHARDS=4', + 'TVM_SHARD_INDEX=2'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh", + label: 'Run TOPI tests', + ) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('topi: GPU 3 of 4') + } +} + +def shard_run_topi_GPU_4_of_4() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/topi-python-gpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TVM_NUM_SHARDS=4', + 'TVM_SHARD_INDEX=3'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh", + label: 'Run TOPI tests', + ) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('topi: GPU 4 of 4') + } +} + + +def shard_run_frontend_GPU_1_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=0'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", + label: 'Run Python frontend tests', + ) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('frontend: GPU 1 of 6') + } +} + +def shard_run_frontend_GPU_2_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=1'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", + label: 'Run Python frontend tests', + ) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('frontend: GPU 2 of 6') + } +} + +def shard_run_frontend_GPU_3_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=2'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", + label: 'Run Python frontend tests', + ) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('frontend: GPU 3 of 6') + } +} + +def shard_run_frontend_GPU_4_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=3'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", + label: 'Run Python frontend tests', + ) + }) + } + } finally { + junit 'build/pytest-results/*.xml' + } + } + } + } else { + Utils.markStageSkippedForConditional('frontend: GPU 4 of 6') + } +} + +def shard_run_frontend_GPU_5_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=4'], { + sh( + script: """ + set -eux + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libvta_fsim.so build/libvta_fsim.so + md5sum build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", + label: 'Run Python frontend tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('topi: aarch64') } - }, - 'integration: aarch64 1 of 2': { - if (!skip_ci && is_docs_only_build != 1) { - node('ARM') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=arm', - 'TVM_NUM_SHARDS=2', - 'TVM_SHARD_INDEX=0'], { - sh( + } else { + Utils.markStageSkippedForConditional('frontend: GPU 5 of 6') + } +} + +def shard_run_frontend_GPU_6_of_6() { + if (!skip_ci && is_docs_only_build != 1) { + node('GPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=gpu', + 'TVM_NUM_SHARDS=6', + 'TVM_SHARD_INDEX=5'], { + sh( script: """ set -eux - aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so md5sum build/libtvm.so - aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libvta_fsim.so build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libvta_fsim.so build/libvta_fsim.so md5sum build/libvta_fsim.so - aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so md5sum build/libtvm_runtime.so - aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake md5sum build/config.cmake """, label: 'Download artifacts from S3', ) - ci_setup(ci_arm) - python_unittest(ci_arm) - sh ( - script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", - label: 'Run CPU integration tests', - ) - }) - } - } finally { - junit 'build/pytest-results/*.xml' + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", + label: 'Run Python frontend tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('integration: aarch64 1 of 2') } - }, - 'integration: aarch64 2 of 2': { - if (!skip_ci && is_docs_only_build != 1) { - node('ARM') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=arm', - 'TVM_NUM_SHARDS=2', - 'TVM_SHARD_INDEX=1'], { - sh( + } else { + Utils.markStageSkippedForConditional('frontend: GPU 6 of 6') + } +} + + +def shard_run_topi_aarch64_1_of_2() { + if (!skip_ci && is_docs_only_build != 1) { + node('ARM') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=arm', + 'TVM_NUM_SHARDS=2', + 'TVM_SHARD_INDEX=0'], { + sh( script: """ set -eux aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so @@ -1444,236 +2343,379 @@ stage('Test') { label: 'Download artifacts from S3', ) - ci_setup(ci_arm) - python_unittest(ci_arm) - sh ( - script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", - label: 'Run CPU integration tests', - ) - }) - } - } finally { - junit 'build/pytest-results/*.xml' + ci_setup(ci_arm) + cpp_unittest(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh", + label: 'Run test_arm_compute_lib test', + ) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_topi.sh", + label: 'Run TOPI tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('integration: aarch64 2 of 2') } - }, - 'topi: GPU 1 of 2': { - if (!skip_ci && is_docs_only_build != 1) { - node('GPU') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/topi-python-gpu") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=gpu', - 'TVM_NUM_SHARDS=2', - 'TVM_SHARD_INDEX=0'], { - sh( + } else { + Utils.markStageSkippedForConditional('topi: aarch64 1 of 2') + } +} + +def shard_run_topi_aarch64_2_of_2() { + if (!skip_ci && is_docs_only_build != 1) { + node('ARM') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=arm', + 'TVM_NUM_SHARDS=2', + 'TVM_SHARD_INDEX=1'], { + sh( script: """ set -eux - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so md5sum build/libtvm.so - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libvta_fsim.so build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libvta_fsim.so build/libvta_fsim.so md5sum build/libvta_fsim.so - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so md5sum build/libtvm_runtime.so - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake md5sum build/config.cmake """, label: 'Download artifacts from S3', ) - ci_setup(ci_gpu) - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh", - label: 'Run TOPI tests', - ) - }) - } - } finally { - junit 'build/pytest-results/*.xml' + ci_setup(ci_arm) + cpp_unittest(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh", + label: 'Run test_arm_compute_lib test', + ) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_topi.sh", + label: 'Run TOPI tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('topi: GPU 1 of 2') } - }, - 'topi: GPU 2 of 2': { - if (!skip_ci && is_docs_only_build != 1) { - node('GPU') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/topi-python-gpu") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=gpu', - 'TVM_NUM_SHARDS=2', - 'TVM_SHARD_INDEX=1'], { - sh( + } else { + Utils.markStageSkippedForConditional('topi: aarch64 2 of 2') + } +} + + +def shard_run_frontend_aarch64_1_of_2() { + if (!skip_ci && is_docs_only_build != 1) { + node('ARM') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-arm") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=arm', + 'TVM_NUM_SHARDS=2', + 'TVM_SHARD_INDEX=0'], { + sh( script: """ set -eux - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so md5sum build/libtvm.so - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libvta_fsim.so build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libvta_fsim.so build/libvta_fsim.so md5sum build/libvta_fsim.so - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so md5sum build/libtvm_runtime.so - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake md5sum build/config.cmake """, label: 'Download artifacts from S3', ) - ci_setup(ci_gpu) - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh", - label: 'Run TOPI tests', - ) - }) - } - } finally { - junit 'build/pytest-results/*.xml' + ci_setup(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_frontend_cpu.sh", + label: 'Run Python frontend tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('topi: GPU 2 of 2') } - }, - 'frontend: GPU 1 of 3': { - if (!skip_ci && is_docs_only_build != 1) { - node('GPU') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=gpu', - 'TVM_NUM_SHARDS=3', - 'TVM_SHARD_INDEX=0'], { - sh( + } else { + Utils.markStageSkippedForConditional('frontend: aarch64 1 of 2') + } +} + +def shard_run_frontend_aarch64_2_of_2() { + if (!skip_ci && is_docs_only_build != 1) { + node('ARM') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-arm") { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM=arm', + 'TVM_NUM_SHARDS=2', + 'TVM_SHARD_INDEX=1'], { + sh( script: """ set -eux - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so md5sum build/libtvm.so - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libvta_fsim.so build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libvta_fsim.so build/libvta_fsim.so md5sum build/libvta_fsim.so - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so md5sum build/libtvm_runtime.so - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake md5sum build/config.cmake """, label: 'Download artifacts from S3', ) - ci_setup(ci_gpu) - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", - label: 'Run Python frontend tests', - ) - }) - } - } finally { - junit 'build/pytest-results/*.xml' + ci_setup(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_frontend_cpu.sh", + label: 'Run Python frontend tests', + ) + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('frontend: GPU 1 of 3') } + } else { + Utils.markStageSkippedForConditional('frontend: aarch64 2 of 2') + } +} + + + +def test() { +stage('Test') { + environment { + SKIP_SLOW_TESTS = "${skip_slow_tests}" + } + parallel( + 'unittest: GPU 1 of 3': { + shard_run_unittest_GPU_1_of_3() + }, + 'unittest: GPU 2 of 3': { + shard_run_unittest_GPU_2_of_3() + }, + 'unittest: GPU 3 of 3': { + shard_run_unittest_GPU_3_of_3() + }, + 'integration: CPU 1 of 6': { + shard_run_integration_CPU_1_of_6() }, - 'frontend: GPU 2 of 3': { + 'integration: CPU 2 of 6': { + shard_run_integration_CPU_2_of_6() + }, + 'integration: CPU 3 of 6': { + shard_run_integration_CPU_3_of_6() + }, + 'integration: CPU 4 of 6': { + shard_run_integration_CPU_4_of_6() + }, + 'integration: CPU 5 of 6': { + shard_run_integration_CPU_5_of_6() + }, + 'integration: CPU 6 of 6': { + shard_run_integration_CPU_6_of_6() + }, + 'python: i386 1 of 5': { + shard_run_python_i386_1_of_5() + }, + 'python: i386 2 of 5': { + shard_run_python_i386_2_of_5() + }, + 'python: i386 3 of 5': { + shard_run_python_i386_3_of_5() + }, + 'python: i386 4 of 5': { + shard_run_python_i386_4_of_5() + }, + 'python: i386 5 of 5': { + shard_run_python_i386_5_of_5() + }, + 'test: Hexagon 1 of 7': { + shard_run_test_Hexagon_1_of_7() + }, + 'test: Hexagon 2 of 7': { + shard_run_test_Hexagon_2_of_7() + }, + 'test: Hexagon 3 of 7': { + shard_run_test_Hexagon_3_of_7() + }, + 'test: Hexagon 4 of 7': { + shard_run_test_Hexagon_4_of_7() + }, + 'test: Hexagon 5 of 7': { + shard_run_test_Hexagon_5_of_7() + }, + 'test: Hexagon 6 of 7': { + shard_run_test_Hexagon_6_of_7() + }, + 'test: Hexagon 7 of 7': { + shard_run_test_Hexagon_7_of_7() + }, + 'integration: aarch64 1 of 4': { + shard_run_integration_aarch64_1_of_4() + }, + 'integration: aarch64 2 of 4': { + shard_run_integration_aarch64_2_of_4() + }, + 'integration: aarch64 3 of 4': { + shard_run_integration_aarch64_3_of_4() + }, + 'integration: aarch64 4 of 4': { + shard_run_integration_aarch64_4_of_4() + }, + 'topi: GPU 1 of 4': { + shard_run_topi_GPU_1_of_4() + }, + 'topi: GPU 2 of 4': { + shard_run_topi_GPU_2_of_4() + }, + 'topi: GPU 3 of 4': { + shard_run_topi_GPU_3_of_4() + }, + 'topi: GPU 4 of 4': { + shard_run_topi_GPU_4_of_4() + }, + 'frontend: GPU 1 of 6': { + shard_run_frontend_GPU_1_of_6() + }, + 'frontend: GPU 2 of 6': { + shard_run_frontend_GPU_2_of_6() + }, + 'frontend: GPU 3 of 6': { + shard_run_frontend_GPU_3_of_6() + }, + 'frontend: GPU 4 of 6': { + shard_run_frontend_GPU_4_of_6() + }, + 'frontend: GPU 5 of 6': { + shard_run_frontend_GPU_5_of_6() + }, + 'frontend: GPU 6 of 6': { + shard_run_frontend_GPU_6_of_6() + }, + 'topi: aarch64 1 of 2': { + shard_run_topi_aarch64_1_of_2() + }, + 'topi: aarch64 2 of 2': { + shard_run_topi_aarch64_2_of_2() + }, + 'frontend: aarch64 1 of 2': { + shard_run_frontend_aarch64_1_of_2() + }, + 'frontend: aarch64 2 of 2': { + shard_run_frontend_aarch64_2_of_2() + }, + 'unittest: CPU': { if (!skip_ci && is_docs_only_build != 1) { - node('GPU') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=gpu', - 'TVM_NUM_SHARDS=3', - 'TVM_SHARD_INDEX=1'], { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-cpu") { + timeout(time: max_time, unit: 'MINUTES') { + try { + init_git() + withEnv(['PLATFORM=cpu'], { sh( script: """ set -eux - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libvta_tsim.so build/libvta_tsim.so + md5sum build/libvta_tsim.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm.so build/libtvm.so md5sum build/libtvm.so - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libvta_fsim.so build/libvta_fsim.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libvta_fsim.so build/libvta_fsim.so md5sum build/libvta_fsim.so - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/libtvm_runtime.so build/libtvm_runtime.so md5sum build/libtvm_runtime.so - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/cpu/build/config.cmake build/config.cmake md5sum build/config.cmake """, label: 'Download artifacts from S3', ) - ci_setup(ci_gpu) + ci_setup(ci_cpu) + cpp_unittest(ci_cpu) + python_unittest(ci_cpu) + fsim_test(ci_cpu) sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", - label: 'Run Python frontend tests', + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh", + label: 'Run VTA tests in TSIM', ) }) + } finally { + junit 'build/pytest-results/*.xml' } - } finally { - junit 'build/pytest-results/*.xml' } } } } else { - Utils.markStageSkippedForConditional('frontend: GPU 2 of 3') + Utils.markStageSkippedForConditional('unittest: CPU') } }, - 'frontend: GPU 3 of 3': { + 'test: QEMU': { if (!skip_ci && is_docs_only_build != 1) { - node('GPU') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM=gpu', - 'TVM_NUM_SHARDS=3', - 'TVM_SHARD_INDEX=2'], { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-qemu") { + timeout(time: max_time, unit: 'MINUTES') { + try { + init_git() + withEnv(['PLATFORM=qemu'], { sh( script: """ set -eux - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm.so build/libtvm.so + aws s3 cp --no-progress s3://${s3_prefix}/qemu/build/libtvm.so build/libtvm.so md5sum build/libtvm.so - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libvta_fsim.so build/libvta_fsim.so - md5sum build/libvta_fsim.so - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/libtvm_runtime.so build/libtvm_runtime.so + aws s3 cp --no-progress s3://${s3_prefix}/qemu/build/libtvm_runtime.so build/libtvm_runtime.so md5sum build/libtvm_runtime.so - aws s3 cp --no-progress s3://${s3_prefix}/gpu/build/config.cmake build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/qemu/build/config.cmake build/config.cmake md5sum build/config.cmake + aws s3 cp --no-progress s3://${s3_prefix}/qemu/build/microtvm_template_projects build/microtvm_template_projects --recursive """, label: 'Download artifacts from S3', ) - ci_setup(ci_gpu) + add_microtvm_permissions() + ci_setup(ci_qemu) + cpp_unittest(ci_qemu) sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", - label: 'Run Python frontend tests', + script: "${docker_run} ${ci_qemu} ./tests/scripts/task_python_microtvm.sh", + label: 'Run microTVM tests', + ) + sh ( + script: "${docker_run} ${ci_qemu} ./tests/scripts/task_demo_microtvm.sh", + label: 'Run microTVM demos', ) }) + } finally { + junit 'build/pytest-results/*.xml' } - } finally { - junit 'build/pytest-results/*.xml' } } } } else { - Utils.markStageSkippedForConditional('frontend: GPU 3 of 3') + Utils.markStageSkippedForConditional('test: QEMU') } }, 'frontend: CPU': { if (!skip_ci && is_docs_only_build != 1) { - node('CPU') { + node('CPU-SMALL') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-cpu") { timeout(time: max_time, unit: 'MINUTES') { try { @@ -1710,45 +2752,6 @@ stage('Test') { Utils.markStageSkippedForConditional('frontend: CPU') } }, - 'frontend: aarch64': { - if (!skip_ci && is_docs_only_build != 1) { - node('ARM') { - ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-arm") { - timeout(time: max_time, unit: 'MINUTES') { - try { - init_git() - withEnv(['PLATFORM=arm'], { - sh( - script: """ - set -eux - aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm.so build/libtvm.so - md5sum build/libtvm.so - aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libvta_fsim.so build/libvta_fsim.so - md5sum build/libvta_fsim.so - aws s3 cp --no-progress s3://${s3_prefix}/arm/build/libtvm_runtime.so build/libtvm_runtime.so - md5sum build/libtvm_runtime.so - aws s3 cp --no-progress s3://${s3_prefix}/arm/build/config.cmake build/config.cmake - md5sum build/config.cmake - """, - label: 'Download artifacts from S3', - ) - - ci_setup(ci_arm) - sh ( - script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_frontend_cpu.sh", - label: 'Run Python frontend tests', - ) - }) - } finally { - junit 'build/pytest-results/*.xml' - } - } - } - } - } else { - Utils.markStageSkippedForConditional('frontend: aarch64') - } - }, 'docs: GPU': { if (!skip_ci) { node('GPU') { diff --git a/jenkins/Build.groovy.j2 b/jenkins/Build.groovy.j2 index c1715949175b..a0ccfde4729e 100644 --- a/jenkins/Build.groovy.j2 +++ b/jenkins/Build.groovy.j2 @@ -43,6 +43,15 @@ def add_microtvm_permissions() { {% endfor %} } +def add_hexagon_permissions() { + {% for folder in hexagon_api %} + sh( + script: 'find {{ folder }} -type f | xargs chmod +x', + label: 'Add execute permissions for hexagon files', + ) + {% endfor %} +} + def build() { stage('Build') { environment { @@ -174,7 +183,11 @@ stage('Build') { label: 'Create Hexagon cmake config', ) make(ci_hexagon, 'build', '-j2') - {{ m.upload_artifacts(tag='hexagon', filenames=tvm_lib) }} + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_build_hexagon_api.sh", + label: 'Build Hexagon API', + ) + {{ m.upload_artifacts(tag='hexagon', filenames=tvm_lib, folders=hexagon_api) }} } } } else { diff --git a/jenkins/Jenkinsfile.j2 b/jenkins/Jenkinsfile.j2 index a1127ec6a8d5..c165de964feb 100644 --- a/jenkins/Jenkinsfile.j2 +++ b/jenkins/Jenkinsfile.j2 @@ -97,6 +97,7 @@ if (currentBuild.getBuildCauses().toString().contains('BranchIndexingCause')) { {% set tvm_multilib = ['build/libtvm.so', 'build/libvta_fsim.so'] + tvm_runtime %} {% set tvm_multilib_tsim = ['build/libvta_tsim.so'] + tvm_multilib %} {% set microtvm_template_projects = ['build/microtvm_template_projects',] %} +{% set hexagon_api = ['build/hexagon_api_output',] %} s3_prefix = "tvm-jenkins-artifacts-prod/tvm/${env.BRANCH_NAME}/${env.BUILD_NUMBER}" // General note: Jenkins has limits on the size of a method (or top level code) diff --git a/jenkins/Test.groovy.j2 b/jenkins/Test.groovy.j2 index b287c2a3156e..7339625b69ff 100644 --- a/jenkins/Test.groovy.j2 +++ b/jenkins/Test.groovy.j2 @@ -1,56 +1,190 @@ +{% set test_method_names = [] %} + +// We have to do this whacky split of the code from where it's used since the +// JVM limits method length to 64k and we easily exceed that with all this +// autogenerated code. This makes it so each test step is in its own method so +// that each individual method isn't too big. +{% call(shard_index, num_shards) m.sharded_test_step( + name="unittest: GPU", + num_shards=3, + node="GPU", + ws="tvm/ut-python-gpu", + platform="gpu", + test_method_names=test_method_names, +) %} + {% if shard_index == 1 %} + {{ m.download_artifacts(tag='gpu2', filenames=tvm_multilib) }} + cpp_unittest(ci_gpu) + + {{ m.download_artifacts(tag='gpu', filenames=tvm_multilib) }} + ci_setup(ci_gpu) + cpp_unittest(ci_gpu) + {% else %} + {{ m.download_artifacts(tag='gpu', filenames=tvm_multilib) }} + ci_setup(ci_gpu) + {% endif %} + {% if shard_index == 2 or num_shards < 2 %} + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh", + label: 'Run Java unit tests', + ) + {% endif %} + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh", + label: 'Run Python GPU unit tests', + ) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh", + label: 'Run Python GPU integration tests', + ) +{% endcall %} +{% call(shard_index, num_shards) m.sharded_test_step( + name="integration: CPU", + node="CPU-SMALL", + num_shards=6, + ws="tvm/integration-python-cpu", + platform="cpu", + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='cpu', filenames=tvm_multilib_tsim) }} + ci_setup(ci_cpu) + sh ( + script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) +{% endcall %} +{% call(shard_index, num_shards) m.sharded_test_step( + name="python: i386", + node="CPU-SMALL", + num_shards=5, + ws="tvm/integration-python-i386", + platform="i386", + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='i386', filenames=tvm_multilib) }} + ci_setup(ci_i386) + {% if shard_index == 1 %} + cpp_unittest(ci_i386) + {% endif %} + python_unittest(ci_i386) + sh ( + script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", + label: 'Run i386 integration tests', + ) + fsim_test(ci_i386) +{% endcall %} +{% call(shard_index, num_shards) m.sharded_test_step( + name="test: Hexagon", + node="CPU-SMALL", + ws="tvm/test-hexagon", + platform="hexagon", + test_method_names=test_method_names, + num_shards=7, +) %} + {{ m.download_artifacts(tag='hexagon', filenames=tvm_lib, folders=hexagon_api) }} + add_hexagon_permissions() + ci_setup(ci_hexagon) + {% if shard_index == 1 %} + cpp_unittest(ci_hexagon) + {% endif %} + sh ( + script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", + label: 'Run Hexagon tests', + ) +{% endcall %} +{% call(shard_index, num_shards) m.sharded_test_step( + name="integration: aarch64", + num_shards=4, + node="ARM", ws="tvm/ut-python-arm", + platform="arm", + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='arm', filenames=tvm_multilib) }} + ci_setup(ci_arm) + python_unittest(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", + label: 'Run CPU integration tests', + ) +{% endcall %} +{% call(shard_index, num_shards) m.sharded_test_step( + name="topi: GPU", + node="GPU", + num_shards=4, + ws="tvm/topi-python-gpu", + platform="gpu", + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='gpu', filenames=tvm_multilib) }} + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh", + label: 'Run TOPI tests', + ) +{% endcall %} +{% call(shard_index, num_shards) m.sharded_test_step( + name="frontend: GPU", + node="GPU", + num_shards=6, + ws="tvm/frontend-python-gpu", + platform="gpu", + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='gpu', filenames=tvm_multilib) }} + ci_setup(ci_gpu) + sh ( + script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", + label: 'Run Python frontend tests', + ) +{% endcall %} +{% call(shard_index, num_shards) m.sharded_test_step( + name="topi: aarch64", + node="ARM", + ws="tvm/ut-python-arm", + platform="arm", + num_shards=2, + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='arm', filenames=tvm_multilib) }} + ci_setup(ci_arm) + cpp_unittest(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh", + label: 'Run test_arm_compute_lib test', + ) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_topi.sh", + label: 'Run TOPI tests', + ) +{% endcall %} +{% call(shard_index, num_shards) m.sharded_test_step( + name="frontend: aarch64", + node="ARM", + ws="tvm/frontend-python-arm", + platform="arm", + num_shards=2, + test_method_names=test_method_names, +) %} + {{ m.download_artifacts(tag='arm', filenames=tvm_multilib) }} + ci_setup(ci_arm) + sh ( + script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_frontend_cpu.sh", + label: 'Run Python frontend tests', + ) +{% endcall %} + + def test() { stage('Test') { environment { SKIP_SLOW_TESTS = "${skip_slow_tests}" } parallel( - {% call(shard_index, num_shards) m.sharded_test_step( - name="unittest: GPU", - num_shards=2, - node="GPU", - ws="tvm/ut-python-gpu", - platform="gpu", - ) %} - {% if shard_index == 1 %} - {{ m.download_artifacts(tag='gpu2', filenames=tvm_multilib) }} - cpp_unittest(ci_gpu) - - {{ m.download_artifacts(tag='gpu', filenames=tvm_multilib) }} - ci_setup(ci_gpu) - cpp_unittest(ci_gpu) - {% else %} - {{ m.download_artifacts(tag='gpu', filenames=tvm_multilib) }} - ci_setup(ci_gpu) - {% endif %} - {% if shard_index == 2 or num_shards < 2 %} - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_java_unittest.sh", - label: 'Run Java unit tests', - ) - {% endif %} - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_unittest_gpuonly.sh", - label: 'Run Python GPU unit tests', - ) - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_integration_gpuonly.sh", - label: 'Run Python GPU integration tests', - ) - {% endcall %} - {% call(shard_index, num_shards) m.sharded_test_step( - name="integration: CPU", - node="CPU", - num_shards=2, - ws="tvm/integration-python-cpu", - platform="cpu", - ) %} - {{ m.download_artifacts(tag='cpu', filenames=tvm_multilib_tsim) }} - ci_setup(ci_cpu) - sh ( - script: "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh", - label: 'Run CPU integration tests', - ) - {% endcall %} + {% for stage_name, method_name in test_method_names %} + '{{ stage_name }}': { + {{ method_name }}() + }, + {% endfor %} {% call m.test_step( name="unittest: CPU", node="CPU-SMALL", @@ -67,46 +201,6 @@ stage('Test') { label: 'Run VTA tests in TSIM', ) {% endcall %} - {% call(shard_index, num_shards) m.sharded_test_step( - name="python: i386", - node="CPU-SMALL", - num_shards=3, - ws="tvm/integration-python-i386", - platform="i386", - ) %} - {{ m.download_artifacts(tag='i386', filenames=tvm_multilib) }} - ci_setup(ci_i386) - {% if shard_index == 1 %} - cpp_unittest(ci_i386) - {% endif %} - python_unittest(ci_i386) - sh ( - script: "${docker_run} ${ci_i386} ./tests/scripts/task_python_integration_i386only.sh", - label: 'Run i386 integration tests', - ) - fsim_test(ci_i386) - {% endcall %} - {% call(shard_index, num_shards) m.sharded_test_step( - name="test: Hexagon", - node="CPU-SMALL", - ws="tvm/test-hexagon", - platform="hexagon", - num_shards=4, - ) %} - {{ m.download_artifacts(tag='hexagon', filenames=tvm_lib) }} - ci_setup(ci_hexagon) - {% if shard_index == 1 %} - cpp_unittest(ci_hexagon) - {% endif %} - sh ( - script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_build_hexagon_api.sh", - label: 'Build Hexagon API', - ) - sh ( - script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_python_hexagon.sh", - label: 'Run Hexagon tests', - ) - {% endcall %} {% call m.test_step( name="test: QEMU", node="CPU-SMALL", @@ -126,68 +220,9 @@ stage('Test') { label: 'Run microTVM demos', ) {% endcall %} - {% call m.test_step( - name="topi: aarch64", - node="ARM", - ws="tvm/ut-python-arm", - platform="arm", -) %} - {{ m.download_artifacts(tag='arm', filenames=tvm_multilib) }} - ci_setup(ci_arm) - cpp_unittest(ci_arm) - sh ( - script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_arm_compute_library.sh", - label: 'Run test_arm_compute_lib test', - ) - sh ( - script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_topi.sh", - label: 'Run TOPI tests', - ) - {% endcall %} - {% call(shard_index, num_shards) m.sharded_test_step( - name="integration: aarch64", - num_shards=2, - node="ARM", ws="tvm/ut-python-arm", - platform="arm", - ) %} - {{ m.download_artifacts(tag='arm', filenames=tvm_multilib) }} - ci_setup(ci_arm) - python_unittest(ci_arm) - sh ( - script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_integration.sh", - label: 'Run CPU integration tests', - ) - {% endcall %} - {% call(shard_index, num_shards) m.sharded_test_step( - name="topi: GPU", - node="GPU", - num_shards=2, - ws="tvm/topi-python-gpu", - platform="gpu", - ) %} - {{ m.download_artifacts(tag='gpu', filenames=tvm_multilib) }} - ci_setup(ci_gpu) - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_topi.sh", - label: 'Run TOPI tests', - ) - {% endcall %} - {% call(shard_index, num_shards) m.sharded_test_step( - name="frontend: GPU", node="GPU", - num_shards=3, - ws="tvm/frontend-python-gpu", - platform="gpu", - ) %} - {{ m.download_artifacts(tag='gpu', filenames=tvm_multilib) }} - ci_setup(ci_gpu) - sh ( - script: "${docker_run} ${ci_gpu} ./tests/scripts/task_python_frontend.sh", - label: 'Run Python frontend tests', - ) - {% endcall %} {% call m.test_step( name="frontend: CPU", - node="CPU", + node="CPU-SMALL", ws="tvm/frontend-python-cpu", platform="cpu", ) %} @@ -198,19 +233,6 @@ stage('Test') { label: 'Run Python frontend tests', ) {% endcall %} - {% call m.test_step( - name="frontend: aarch64", - node="ARM", - ws="tvm/frontend-python-arm", - platform="arm", -) %} - {{ m.download_artifacts(tag='arm', filenames=tvm_multilib) }} - ci_setup(ci_arm) - sh ( - script: "${docker_run} ${ci_arm} ./tests/scripts/task_python_frontend_cpu.sh", - label: 'Run Python frontend tests', - ) - {% endcall %} 'docs: GPU': { if (!skip_ci) { node('GPU') { diff --git a/jenkins/macros.j2 b/jenkins/macros.j2 index ce29aa2d580d..1c649e31fabf 100644 --- a/jenkins/macros.j2 +++ b/jenkins/macros.j2 @@ -19,31 +19,35 @@ "workspace/exec_${env.EXECUTOR_NUMBER}/{{ folder }}" {%- endmacro -%} -{% macro sharded_test_step(name, num_shards, node, ws, platform) %} +{% macro sharded_test_step(name, num_shards, node, ws, platform, test_method_names) %} + {% for shard_index in range(1, num_shards + 1) %} - '{{ name }} {{ shard_index }} of {{ num_shards }}': { - if (!skip_ci && is_docs_only_build != 1) { - node('{{ node }}') { - ws({{ per_exec_ws(ws) }}) { - try { - init_git() - timeout(time: max_time, unit: 'MINUTES') { - withEnv([ - 'PLATFORM={{ platform }}', - 'TVM_NUM_SHARDS={{ num_shards }}', - 'TVM_SHARD_INDEX={{ shard_index - 1 }}'], { - {{ caller(shard_index, num_shards) | trim | indent(width=12) }} - }) - } - } finally { - junit 'build/pytest-results/*.xml' +{% set method_name = "shard_run_" + name.replace(":", "").replace(" ", "-").replace("-", "_") + "_" + shard_index|string + "_of_" + num_shards|string %} +def {{ method_name }}() { + if (!skip_ci && is_docs_only_build != 1) { + node('{{ node }}') { + ws({{ per_exec_ws(ws) }}) { + try { + init_git() + timeout(time: max_time, unit: 'MINUTES') { + withEnv([ + 'PLATFORM={{ platform }}', + 'TVM_NUM_SHARDS={{ num_shards }}', + 'TVM_SHARD_INDEX={{ shard_index - 1 }}'], { + {{ caller(shard_index, num_shards) | trim | indent(width=12) }} + }) } + } finally { + junit 'build/pytest-results/*.xml' } } - } else { - Utils.markStageSkippedForConditional('{{ name }} {{ shard_index }} of {{ num_shards }}') } - }, + } else { + Utils.markStageSkippedForConditional('{{ name }} {{ shard_index }} of {{ num_shards }}') + } +} +{% set _ = test_method_names.append((name + " " + shard_index|string + " of " + num_shards|string, method_name)) %} + {% endfor %} {% endmacro %}