diff --git a/Jenkinsfile b/Jenkinsfile index fa1629205080..f78933a10cf1 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -149,6 +149,9 @@ stage('Prepare') { } stage("Sanity Check") { + environment { + CI = 'true' + } timeout(time: max_time, unit: 'MINUTES') { node('CPU') { ws(per_exec_ws("tvm/sanity")) { @@ -162,7 +165,7 @@ stage("Sanity Check") { // Run make. First try to do an incremental make from a previous workspace in hope to // accelerate the compilation. If something wrong, clean the workspace and then // build from scratch. -def make(docker_type, path, make_flag) { +def make(docker_type, path, make_flag='') { timeout(time: max_time, unit: 'MINUTES') { try { sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${path} ${make_flag}" @@ -201,16 +204,19 @@ def unpack_lib(name, libs) { } stage('Build') { + environment { + CI = 'true' + } parallel 'BUILD: GPU': { node('GPUBUILD') { ws(per_exec_ws("tvm/build-gpu")) { init_git() sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh" - make(ci_gpu, 'build', '-j2') + make(ci_gpu, 'build') pack_lib('gpu', tvm_multilib) // compiler test sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu_vulkan.sh" - make(ci_gpu, 'build2', '-j2') + make(ci_gpu, 'build2') } } }, @@ -219,7 +225,7 @@ stage('Build') { ws(per_exec_ws("tvm/build-cpu")) { init_git() sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh" - make(ci_cpu, 'build', '-j2') + make(ci_cpu, 'build') pack_lib('cpu', tvm_multilib_tsim) timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh" @@ -240,7 +246,7 @@ stage('Build') { ws(per_exec_ws("tvm/build-wasm")) { init_git() sh "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh" - make(ci_wasm, 'build', '-j2') + make(ci_wasm, 'build') timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_wasm} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_wasm} ./tests/scripts/task_web_wasm.sh" @@ -253,7 +259,7 @@ stage('Build') { ws(per_exec_ws("tvm/build-i386")) { init_git() sh "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh" - make(ci_i386, 'build', '-j2') + make(ci_i386, 'build') pack_lib('i386', tvm_multilib_tsim) } } @@ -263,7 +269,7 @@ stage('Build') { ws(per_exec_ws("tvm/build-arm")) { init_git() sh "${docker_run} ${ci_arm} ./tests/scripts/task_config_build_arm.sh" - make(ci_arm, 'build', '-j4') + make(ci_arm, 'build') pack_lib('arm', tvm_multilib) } } @@ -273,7 +279,7 @@ stage('Build') { ws(per_exec_ws("tvm/build-qemu")) { init_git() sh "${docker_run} ${ci_qemu} ./tests/scripts/task_config_build_qemu.sh" - make(ci_qemu, 'build', '-j2') + make(ci_qemu, 'build') timeout(time: max_time, unit: 'MINUTES') { sh "${docker_run} ${ci_qemu} ./tests/scripts/task_ci_setup.sh" sh "${docker_run} ${ci_qemu} ./tests/scripts/task_python_microtvm.sh" @@ -285,6 +291,9 @@ stage('Build') { } stage('Unit Test') { + environment { + CI = 'true' + } parallel 'python3: GPU': { node('TensorCore') { ws(per_exec_ws("tvm/ut-python-gpu")) { @@ -345,6 +354,9 @@ stage('Unit Test') { } stage('Integration Test') { + environment { + CI = 'true' + } parallel 'topi: GPU': { node('GPU') { ws(per_exec_ws("tvm/topi-python-gpu")) { @@ -401,6 +413,9 @@ stage('Integration Test') { /* stage('Build packages') { + environment { + CI = 'true' + } parallel 'conda CPU': { node('CPU') { sh "${docker_run} tlcpack/conda-cpu ./conda/build_cpu.sh @@ -418,6 +433,9 @@ stage('Build packages') { */ stage('Deploy') { + environment { + CI = 'true' + } node('doc') { ws(per_exec_ws("tvm/deploy-docs")) { if (env.BRANCH_NAME == "main") { diff --git a/docker/bash.sh b/docker/bash.sh index 372cfded8f89..9404f99295de 100755 --- a/docker/bash.sh +++ b/docker/bash.sh @@ -33,16 +33,24 @@ # With -i, execute interactively. # -set -euo pipefail +set -xeuo pipefail function show_usage() { cat <] [--mount MOUNT_DIR] [--repo-mount-point REPO_MOUNT_POINT] [--dry-run] [--] [COMMAND] +--cpuset-cpus= + + Restrict docker container to use specific CPUs. See + docker run --help for further documentation of this parameter. + When launched from the CI (the "CI" environment variable is set), + this parameter is inferred from the "NODE_NAME" and "EXECUTOR_NUMBER" + environment variables. + -h, --help Display this help message. @@ -55,13 +63,6 @@ Usage: docker/bash.sh [-i|--interactive] [--net=host] [-t|--tty] Start the docker session with a pseudo terminal (tty). ---net=host - - Expose servers run into the container to the host, passing the - "--net=host" argument through to docker. On MacOS, this is - instead passed as "-p 8888:8888" since the host networking driver - isn't supported. - --mount MOUNT_DIR Expose MOUNT_DIR as an additional mount point inside the docker @@ -69,6 +70,13 @@ Usage: docker/bash.sh [-i|--interactive] [--net=host] [-t|--tty] the folder location outside the container. This option can be specified multiple times. +--net=host + + Expose servers run into the container to the host, passing the + "--net=host" argument through to docker. On MacOS, this is + instead passed as "-p 8888:8888" since the host networking driver + isn't supported. + --repo-mount-point REPO_MOUNT_POINT The directory inside the docker container at which the TVM @@ -111,6 +119,7 @@ EOF SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P)" REPO_DIR="$(dirname "${SCRIPT_DIR}")" +CPUSET_CPUS= DRY_RUN=false INTERACTIVE=false TTY=false @@ -146,6 +155,16 @@ break_joined_flag='if (( ${#1} == 2 )); then shift; else set -- -"${1#-i}" "${@: while (( $# )); do case "$1" in + --cpuset-cpus=?*) + CPUSET_CPUS="${1#*=}" + shift + ;; + + --dry-run) + DRY_RUN=true + shift + ;; + -h|--help) show_usage exit 0 @@ -180,8 +199,8 @@ while (( $# )); do shift ;; - --dry-run) - DRY_RUN=true + --net=host) + USE_NET_HOST=true shift ;; @@ -276,6 +295,46 @@ DOCKER_ENV+=( --env CI_BUILD_HOME="${REPO_MOUNT_POINT}" --env CI_IMAGE_NAME="${DOCKER_IMAGE_NAME}" ) +# Choose CPUs on which this container will execute. +if [ -n "${CI+x}" -a -z "${CPUSET_CPUS}" ]; then + if [ -n "${CI_NUM_EXECUTORS-}" ]; then + if [ -n "${CI_CPUSET_LOWER_BOUND-}" -a -n "${CI_CPUSET_UPPER_BOUND-}" ]; then + TOTAL_CPUS=$(expr "${CI_CPUSET_UPPER_BOUND}" - "${CI_CPUSET_LOWER_BOUND}" + 1) || /bin/true + if [ "${TOTAL_CPUS}" -lt 1 ]; then + echo "ERROR: computed TOTAL_CPUS=${TOTAL_CPUS} based on CI_CPUSET_{UPPER,LOWER}_BOUND!" + exit 2 + fi + else + TOTAL_CPUS=$(nproc) + CI_CPUSET_LOWER_BOUND=0 + fi + CPUS_PER_EXECUTOR=$(expr "${TOTAL_CPUS}" / "${CI_NUM_EXECUTORS}") + # NOTE: Expr exit status varies by the computed value (good and bad!). + CPUSET_CPUS_LOWER_BOUND=$(expr "${CI_CPUSET_LOWER_BOUND}" + \( "${CPUS_PER_EXECUTOR}" '*' "${EXECUTOR_NUMBER}" \) ) || /bin/true + CPUSET_CPUS_UPPER_BOUND=$(expr "${CPUSET_CPUS_LOWER_BOUND}" + "${CPUS_PER_EXECUTOR}" - 1) || /bin/true + CPUSET_CPUS="${CPUSET_CPUS_LOWER_BOUND}-${CPUSET_CPUS_UPPER_BOUND}" + echo "COMPUTE TOTAL_CPUS=${TOTAL_CPUS} CPUS_PER_EXECUTOR=${CPUS_PER_EXECUTOR} CPUSET_CPUS_LOWER_BOUND=${CPUSET_CPUS_LOWER_BOUND} CPUSET_CPUS_UPPER_BOUND=${CPUSET_CPUS_UPPER_BOUND}" + else + echo "WARNING: CI_NUM_EXECUTORS environment variable not set." + echo "No CPU parallism will be used in this CI build, so it may be quite slow." + fi +fi + +if [ -n "${CPUSET_CPUS}" ]; then + if [ -z "$(echo ${CPUSET_CPUS} | sed -E '/^[0-9]+-[0-9]+$/ p; /.*/ d')" ]; then + echo "error: --cpuset-cpus: must specify in the form -; got ${CPUSET_CPUS}" + exit 2 + fi + CPUSET_CPUS_LOWER_BOUND=$(echo "${CPUSET_CPUS}" | sed -E 's/^([0-9]+)-.*$/\1/g') + CPUSET_CPUS_UPPER_BOUND=$(echo "${CPUSET_CPUS}" | sed -E 's/^.*-([0-9]+)$/\1/g') + CPUSET_NUM_CPUS=$(expr "${CPUSET_CPUS_UPPER_BOUND}" - "${CPUSET_CPUS_LOWER_BOUND}" + 1) || /bin/true + DOCKER_FLAGS+=( + "--cpuset-cpus=${CPUSET_CPUS}" + "--env" "CI_CPUSET_CPUS=${CPUSET_CPUS}" + "--env" "CI_CPUSET_NUM_CPUS=${CPUSET_NUM_CPUS}" + ) + echo "USING CPUSET_CPUS ${CPUSET_CPUS}" +fi # Pass tvm test data folder through to the docker container, to avoid # repeated downloads. Check if we have permissions to write to the diff --git a/python/tvm/contrib/nvcc.py b/python/tvm/contrib/nvcc.py index 612be292e873..4e8fc7c2176a 100644 --- a/python/tvm/contrib/nvcc.py +++ b/python/tvm/contrib/nvcc.py @@ -97,7 +97,10 @@ def compile_cuda(code, target="ptx", arch=None, options=None, path_target=None): # if cxx_compiler_path != "": # cmd += ["-ccbin", cxx_compiler_path] - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + # NOTE(areusch): Per https://github.com/lpereira/lwan/issues/106, stdin must be left open. + proc = subprocess.Popen( + cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT + ) (out, _) = proc.communicate() diff --git a/python/tvm/testing/plugin.py b/python/tvm/testing/plugin.py index 95875acbd82c..fc9bad05cad9 100644 --- a/python/tvm/testing/plugin.py +++ b/python/tvm/testing/plugin.py @@ -36,6 +36,7 @@ import tvm from tvm.testing import utils +from xdist.scheduler.loadscope import LoadScopeScheduling MARKERS = { @@ -288,3 +289,16 @@ def _parametrize_correlated_parameters(metafunc): names = ",".join(name for name, values in params) value_sets = zip(*[values for name, values in params]) metafunc.parametrize(names, value_sets, indirect=True, ids=ids) + + +class TvmTestScheduler(LoadScopeScheduling): + def _split_scope(self, nodeid): + # NOTE: test_tvm_testing_features contains parametrization-related tests, and must be + # serialized on a single host. + if "test_tvm_testing_features" in nodeid: + return "functional-tests" + return nodeid + + +def pytest_xdist_make_scheduler(config, log): + return TvmTestScheduler(config, log) diff --git a/tests/python/unittest/test_auto_scheduler_search_policy.py b/tests/python/unittest/test_auto_scheduler_search_policy.py index a9f6596a8548..84480c9526de 100644 --- a/tests/python/unittest/test_auto_scheduler_search_policy.py +++ b/tests/python/unittest/test_auto_scheduler_search_policy.py @@ -210,11 +210,4 @@ def apply_func(search_policy, state, stage_id): if __name__ == "__main__": - test_workload_registry_empty_policy() - test_sketch_search_policy_basic() - test_sketch_search_policy_basic_spawn() - test_sketch_search_policy_xgbmodel() - test_sketch_search_policy_cuda_rpc_runner() - test_sketch_search_policy_cuda_xgbmodel_rpc_runner() - test_sketch_search_policy_zero_rank() - test_sketch_search_policy_custom_sketch() + sys.exit(pytest.main([__file__] + sys.argv[1:])) diff --git a/tests/scripts/setup-pytest-env.sh b/tests/scripts/setup-pytest-env.sh index bcd27a16f659..49f999d8eaf0 100755 --- a/tests/scripts/setup-pytest-env.sh +++ b/tests/scripts/setup-pytest-env.sh @@ -32,7 +32,35 @@ export PYTHONPATH="${TVM_PATH}/python" export TVM_PYTEST_RESULT_DIR="${TVM_PATH}/build/pytest-results" mkdir -p "${TVM_PYTEST_RESULT_DIR}" +if [ -n "${CI_CPUSET_NUM_CPUS-}" ]; then + # When the # of CPUs has been restricted (e.g. when --cpuset-cpus has been passed to docker by + # docker/bash.sh), explicitly use all available CPUs. This environment variable is set by + # docker/bash.sh when it sets --cpuset-cpus. + PYTEST_NUM_CPUS="${CI_CPUSET_NUM_CPUS}" +else + # Else attempt to use $(nproc) - 1. + PYTEST_NUM_CPUS=$(nproc) + if [ -z "${PYTEST_NUM_CPUS}" ]; then + echo "WARNING: nproc failed; running pytest with only 1 CPU" + PYTEST_NUM_CPUS=1 + elif [ ${PYTEST_NUM_CPUS} -gt 1 ]; then + PYTEST_NUM_CPUS=$(expr ${PYTEST_NUM_CPUS} - 1) # Don't nuke interactive work. + fi + + # Don't use >4 CPUs--in general, we only use 4 CPUs in testing, so we want to retain this + # maximum for the purposes of reproducing the CI. You can still override this by setting + # --cpuset-cpus in docker/bash.sh. + if [ ${PYTEST_NUM_CPUS} -gt 4 ]; then + PYTEST_NUM_CPUS=4 + fi +fi + function run_pytest() { + local extra_args=( ) + if [ "$1" == "--parallel" ]; then + extra_args=( -n "${PYTEST_NUM_CPUS}" ) + shift + fi local ffi_type="$1" shift local test_suite_name="$1" @@ -43,8 +71,10 @@ function run_pytest() { exit 2 fi TVM_FFI=${ffi_type} python3 -m pytest \ + --timeout=480 \ -o "junit_suite_name=${test_suite_name}-${ffi_type}" \ "--junit-xml=${TVM_PYTEST_RESULT_DIR}/${test_suite_name}-${ffi_type}.xml" \ "--junit-prefix=${ffi_type}" \ + "${extra_args[@]}" \ "$@" } diff --git a/tests/scripts/task_build.sh b/tests/scripts/task_build.sh index 845b7153ae20..f3b389d2a902 100755 --- a/tests/scripts/task_build.sh +++ b/tests/scripts/task_build.sh @@ -15,5 +15,18 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +set -eux + export VTA_HW_PATH=`pwd`/3rdparty/vta-hw -cd $1 && cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo && make $2 && cd .. +MAKE_ARG=( ) +if [ -n "${2+x}" ]; then + MAKE_ARG=( "${2}" ) +fi + +if [ -n "${CI_CPUSET_NUM_CPUS+x}" -a "a${MAKE_ARG[@]:+b}" == "a" ]; then + MAKE_ARG=( "-j${CI_CPUSET_NUM_CPUS}" ) +fi + +cd "$1" +cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo +make ${MAKE_ARG[@]+"${MAKE_ARG[@]}"} diff --git a/tests/scripts/task_ci_setup.sh b/tests/scripts/task_ci_setup.sh index 01d5587e70ad..8a77d48edc4d 100755 --- a/tests/scripts/task_ci_setup.sh +++ b/tests/scripts/task_ci_setup.sh @@ -30,7 +30,7 @@ set -o pipefail # echo "Addtiional setup in" ${CI_IMAGE_NAME} -python3 -m pip install --user tlcpack-sphinx-addon==0.2.1 synr==0.4.0 +python3 -m pip install --user tlcpack-sphinx-addon==0.2.1 synr==0.4.0 pytest-timeout # Rebuild standalone_crt in build/ tree. This file is not currently archived by pack_lib() in # Jenkinsfile. We expect config.cmake to be present from pack_lib(). diff --git a/tests/scripts/task_python_frontend.sh b/tests/scripts/task_python_frontend.sh index a2f6d706a163..22f7cef33510 100755 --- a/tests/scripts/task_python_frontend.sh +++ b/tests/scripts/task_python_frontend.sh @@ -35,7 +35,7 @@ echo "Running relay MXNet frontend test..." run_pytest cython python-frontend-mxnet tests/python/frontend/mxnet echo "Running relay ONNX frontend test..." -run_pytest cython python-frontend-onnx tests/python/frontend/onnx +run_pytest --parallel cython python-frontend-onnx tests/python/frontend/onnx echo "Running relay CoreML frontend test..." run_pytest cython python-frontend-coreml tests/python/frontend/coreml @@ -50,7 +50,7 @@ echo "Running relay DarkNet frontend test..." run_pytest cython python-frontend-darknet tests/python/frontend/darknet echo "Running relay PyTorch frontend test..." -run_pytest cython python-frontend-pytorch tests/python/frontend/pytorch +run_pytest --parallel cython python-frontend-pytorch tests/python/frontend/pytorch echo "Running relay PaddlePaddle frontend test..." -run_pytest cython python-frontend-paddlepaddle tests/python/frontend/paddlepaddle +run_pytest --parallel cython python-frontend-paddlepaddle tests/python/frontend/paddlepaddle diff --git a/tests/scripts/task_python_integration.sh b/tests/scripts/task_python_integration.sh index 00b63af48646..7979a5d74dd6 100755 --- a/tests/scripts/task_python_integration.sh +++ b/tests/scripts/task_python_integration.sh @@ -44,33 +44,36 @@ rm -rf lib make cd ../.. -run_pytest ctypes ${TVM_INTEGRATION_TESTSUITE_NAME}-extensions apps/extension/tests -run_pytest cython ${TVM_INTEGRATION_TESTSUITE_NAME}-extensions apps/extension/tests +# Only run in parallel for CPU integration tests. +PYTEST_PARALLEL="${PYTEST_PARALLEL:---parallel}" + +run_pytest "${PYTEST_PARALLEL}" ctypes ${TVM_INTEGRATION_TESTSUITE_NAME}-extensions apps/extension/tests +run_pytest "${PYTEST_PARALLEL}" cython ${TVM_INTEGRATION_TESTSUITE_NAME}-extensions apps/extension/tests # Test dso plugin cd apps/dso_plugin_module rm -rf lib make cd ../.. -run_pytest ctypes ${TVM_INTEGRATION_TESTSUITE_NAME}-dso_plugin_module apps/dso_plugin_module -run_pytest cython ${TVM_INTEGRATION_TESTSUITE_NAME}-dso_plugin_module apps/dso_plugin_module +run_pytest "${PYTEST_PARALLEL}" ctypes ${TVM_INTEGRATION_TESTSUITE_NAME}-dso_plugin_module apps/dso_plugin_module +run_pytest "${PYTEST_PARALLEL}" cython ${TVM_INTEGRATION_TESTSUITE_NAME}-dso_plugin_module apps/dso_plugin_module # Do not enable TensorFlow op # TVM_FFI=cython sh prepare_and_test_tfop_module.sh # TVM_FFI=ctypes sh prepare_and_test_tfop_module.sh -run_pytest ctypes ${TVM_INTEGRATION_TESTSUITE_NAME} tests/python/integration +run_pytest "${PYTEST_PARALLEL}" ctypes ${TVM_INTEGRATION_TESTSUITE_NAME} tests/python/integration if python -c "import tvm; from tvm.relay.op.contrib.ethosn import ethosn_available; print(ethosn_available().name)" -eq "SW_ONLY"; then - ETHOSN_VARIANT_CONFIG=ETHOSN78_1TOPS_4PLE_448KSRAM run_pytest ctypes ${TVM_INTEGRATION_TESTSUITE_NAME}-contrib-test_ethosn tests/python/contrib/test_ethosn + ETHOSN_VARIANT_CONFIG=ETHOSN78_1TOPS_4PLE_448KSRAM run_pytest "${PYTEST_PARALLEL}" ctypes ${TVM_INTEGRATION_TESTSUITE_NAME}-contrib-test_ethosn tests/python/contrib/test_ethosn fi -run_pytest ctypes ${TVM_INTEGRATION_TESTSUITE_NAME}-contrib tests/python/contrib +run_pytest "${PYTEST_PARALLEL}" ctypes ${TVM_INTEGRATION_TESTSUITE_NAME}-contrib tests/python/contrib # forked is needed because the global registry gets contaminated TVM_TEST_TARGETS="${TVM_RELAY_TEST_TARGETS:-llvm;cuda}" \ - run_pytest ctypes ${TVM_INTEGRATION_TESTSUITE_NAME}-relay tests/python/relay + run_pytest "${PYTEST_PARALLEL}" ctypes ${TVM_INTEGRATION_TESTSUITE_NAME}-relay tests/python/relay # Command line driver test -run_pytest ctypes ${TVM_INTEGRATION_TESTSUITE_NAME}-driver tests/python/driver +run_pytest "${PYTEST_PARALLEL}" ctypes ${TVM_INTEGRATION_TESTSUITE_NAME}-driver tests/python/driver # Do not enable OpenGL # run_pytest ctypes ${TVM_INTEGRATION_TESTSUITE_NAME}-webgl tests/webgl diff --git a/tests/scripts/task_python_integration_gpuonly.sh b/tests/scripts/task_python_integration_gpuonly.sh index ac09cb5a14a3..30a83a5d21f0 100755 --- a/tests/scripts/task_python_integration_gpuonly.sh +++ b/tests/scripts/task_python_integration_gpuonly.sh @@ -20,5 +20,6 @@ export TVM_TEST_TARGETS="cuda;opencl;metal;rocm;vulkan;nvptx;opencl -device=mali export PYTEST_ADDOPTS="-m gpu $PYTEST_ADDOPTS" export TVM_RELAY_TEST_TARGETS="cuda" export TVM_INTEGRATION_TESTSUITE_NAME=python-integration-gpu +export PYTEST_PARALLEL= ./tests/scripts/task_python_integration.sh diff --git a/tests/scripts/task_python_unittest.sh b/tests/scripts/task_python_unittest.sh index 54a36f6dcfd4..6c35042714a8 100755 --- a/tests/scripts/task_python_unittest.sh +++ b/tests/scripts/task_python_unittest.sh @@ -18,6 +18,7 @@ set -e set -u +set -x source tests/scripts/setup-pytest-env.sh @@ -31,9 +32,9 @@ if [ -z "${TVM_UNITTEST_TESTSUITE_NAME:-}" ]; then fi # First run minimal test on both ctypes and cython. -run_pytest ctypes ${TVM_UNITTEST_TESTSUITE_NAME}-platform-minimal-test tests/python/all-platform-minimal-test -run_pytest cython ${TVM_UNITTEST_TESTSUITE_NAME}-platform-minimal-test tests/python/all-platform-minimal-test +run_pytest --parallel ctypes ${TVM_UNITTEST_TESTSUITE_NAME}-platform-minimal-test tests/python/all-platform-minimal-test +run_pytest --parallel cython ${TVM_UNITTEST_TESTSUITE_NAME}-platform-minimal-test tests/python/all-platform-minimal-test # Then run all unittests on both ctypes and cython. -run_pytest ctypes ${TVM_UNITTEST_TESTSUITE_NAME} tests/python/unittest -run_pytest cython ${TVM_UNITTEST_TESTSUITE_NAME} tests/python/unittest +run_pytest --parallel ctypes ${TVM_UNITTEST_TESTSUITE_NAME} tests/python/unittest +run_pytest --parallel cython ${TVM_UNITTEST_TESTSUITE_NAME} tests/python/unittest diff --git a/tests/scripts/task_python_vta_fsim.sh b/tests/scripts/task_python_vta_fsim.sh index 4074fb888351..14eb22965516 100755 --- a/tests/scripts/task_python_vta_fsim.sh +++ b/tests/scripts/task_python_vta_fsim.sh @@ -40,8 +40,8 @@ cp ${VTA_HW_PATH}/config/fsim_sample.json ${VTA_HW_PATH}/config/vta_config.json # Run unit tests in functional/fast simulator echo "Running unittest in fsim..." -run_pytest cython python-vta-fsim-unittest ${TVM_PATH}/vta/tests/python/unittest +run_pytest --parallel cython python-vta-fsim-unittest ${TVM_PATH}/vta/tests/python/unittest # Run unit tests in functional/fast simulator echo "Running integration test in fsim..." -run_pytest cython python-vta-fsim-integration ${TVM_PATH}/vta/tests/python/integration +run_pytest --parallel cython python-vta-fsim-integration ${TVM_PATH}/vta/tests/python/integration diff --git a/tests/scripts/task_python_vta_tsim.sh b/tests/scripts/task_python_vta_tsim.sh index 4c21f46c5f81..b0f7eab064d9 100755 --- a/tests/scripts/task_python_vta_tsim.sh +++ b/tests/scripts/task_python_vta_tsim.sh @@ -58,11 +58,11 @@ make -C ${VTA_HW_PATH}/hardware/chisel USE_THREADS=0 lib # Run unit tests in cycle accurate simulator echo "Running unittest in tsim..." -run_pytest cython python-vta-tsim-unittest ${TVM_PATH}/vta/tests/python/unittest +run_pytest --parallel cython python-vta-tsim-unittest ${TVM_PATH}/vta/tests/python/unittest # Run unit tests in cycle accurate simulator echo "Running integration test in tsim..." -run_pytest cython python-vta-tsim-integration ${TVM_PATH}/vta/tests/python/integration +run_pytest --parallel cython python-vta-tsim-integration ${TVM_PATH}/vta/tests/python/integration # Reset default fsim simulation cp ${VTA_HW_PATH}/config/fsim_sample.json ${VTA_HW_PATH}/config/vta_config.json