From 2432b6c57f614aff3c3ba2154ce09ee57c1ba354 Mon Sep 17 00:00:00 2001 From: ptrblck Date: Wed, 29 Mar 2023 14:03:50 -0700 Subject: [PATCH] remove CUDA 11.6 builds (#1366) * remove CUDA 11.6 builds * remove more 11.6 builds --- .github/workflows/build-libtorch-images.yml | 2 +- .github/workflows/build-manywheel-images.yml | 2 +- CUDA_UPGRADE_GUIDE.MD | 4 +- common/install_cuda.sh | 56 +------------------ conda/Dockerfile | 5 -- conda/build_pytorch.sh | 3 - conda/pytorch-cuda/conda_build_config.yaml | 1 - conda/pytorch-cuda/meta.yaml | 8 --- conda/pytorch-nightly/bld.bat | 6 -- conda/pytorch-nightly/build.sh | 9 +-- libtorch/Dockerfile | 4 -- libtorch/build_all_docker.sh | 2 +- manywheel/build_all_docker.sh | 2 +- manywheel/build_cuda.sh | 36 +----------- windows/cuda116.bat | 58 -------------------- windows/internal/cuda_install.bat | 28 ---------- windows/internal/smoke_test.bat | 3 - 17 files changed, 9 insertions(+), 220 deletions(-) delete mode 100644 windows/cuda116.bat diff --git a/.github/workflows/build-libtorch-images.yml b/.github/workflows/build-libtorch-images.yml index 49069557a..e2493679a 100644 --- a/.github/workflows/build-libtorch-images.yml +++ b/.github/workflows/build-libtorch-images.yml @@ -28,7 +28,7 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - cuda_version: ["11.8", "11.7", "11.6"] + cuda_version: ["11.8", "11.7"] env: GPU_ARCH_TYPE: cuda GPU_ARCH_VERSION: ${{ matrix.cuda_version }} diff --git a/.github/workflows/build-manywheel-images.yml b/.github/workflows/build-manywheel-images.yml index 153f501bc..18511f027 100644 --- a/.github/workflows/build-manywheel-images.yml +++ b/.github/workflows/build-manywheel-images.yml @@ -30,7 +30,7 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - cuda_version: ["11.8", "11.7", "11.6"] + cuda_version: ["11.8", "11.7"] env: GPU_ARCH_TYPE: cuda GPU_ARCH_VERSION: ${{ matrix.cuda_version }} diff --git a/CUDA_UPGRADE_GUIDE.MD b/CUDA_UPGRADE_GUIDE.MD index ae3f158d3..02575d4c7 100644 --- a/CUDA_UPGRADE_GUIDE.MD +++ b/CUDA_UPGRADE_GUIDE.MD @@ -9,8 +9,8 @@ Here is the supported matrix for CUDA and CUDNN | CUDA | CUDNN | additional details | | --- | --- | --- | -| 11.6 | 8.3.2.44 | Stable CUDA Release | -| 11.7 | 8.5.0.96 | Latest CUDA Release | +| 11.7 | 8.5.0.96 | Stable CUDA Release | +| 11.8 | 8.7.0.84 | Latest CUDA Release | ### B. Check the package availability diff --git a/common/install_cuda.sh b/common/install_cuda.sh index 27d4b0c1c..ca432678d 100644 --- a/common/install_cuda.sh +++ b/common/install_cuda.sh @@ -2,27 +2,6 @@ set -ex -function install_116 { - echo "Installing CUDA 11.6 and CuDNN 8.3" - rm -rf /usr/local/cuda-11.6 /usr/local/cuda - # install CUDA 11.6.2 in the same container - wget -q https://developer.download.nvidia.com/compute/cuda/11.6.2/local_installers/cuda_11.6.2_510.47.03_linux.run - chmod +x cuda_11.6.2_510.47.03_linux.run - ./cuda_11.6.2_510.47.03_linux.run --toolkit --silent - rm -f cuda_11.6.2_510.47.03_linux.run - rm -f /usr/local/cuda && ln -s /usr/local/cuda-11.6 /usr/local/cuda - - # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement - mkdir tmp_cudnn && cd tmp_cudnn - wget -q https://developer.download.nvidia.com/compute/redist/cudnn/v8.3.2/local_installers/11.5/cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive.tar.xz -O cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive.tar.xz - tar xf cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive.tar.xz - cp -a cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive/include/* /usr/local/cuda/include/ - cp -a cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive/lib/* /usr/local/cuda/lib64/ - cd .. - rm -rf tmp_cudnn - ldconfig -} - function install_117 { echo "Installing CUDA 11.7 and CuDNN 8.5 and NCCL 2.14" rm -rf /usr/local/cuda-11.7 /usr/local/cuda @@ -116,37 +95,6 @@ function install_121 { ldconfig } -function prune_116 { - echo "Pruning CUDA 11.6 and CuDNN" - ##################################################################################### - # CUDA 11.6 prune static libs - ##################################################################################### - export NVPRUNE="/usr/local/cuda-11.6/bin/nvprune" - export CUDA_LIB_DIR="/usr/local/cuda-11.6/lib64" - - export GENCODE="-gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86" - export GENCODE_CUDNN="-gencode arch=compute_35,code=sm_35 -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86" - - if [[ -n "$OVERRIDE_GENCODE" ]]; then - export GENCODE=$OVERRIDE_GENCODE - fi - - # all CUDA libs except CuDNN and CuBLAS (cudnn and cublas need arch 3.7 included) - ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis" \ - | xargs -I {} bash -c \ - "echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}" - - # prune CuDNN and CuBLAS - $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a - $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a - - ##################################################################################### - # CUDA 11.6 prune visual tools - ##################################################################################### - export CUDA_BASE="/usr/local/cuda-11.6/" - rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2022.1.1 $CUDA_BASE/nsight-systems-2021.5.2 -} - function prune_117 { echo "Pruning CUDA 11.7 and CuDNN" ##################################################################################### @@ -172,7 +120,7 @@ function prune_117 { $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a ##################################################################################### - # CUDA 11.6 prune visual tools + # CUDA 11.7 prune visual tools ##################################################################################### export CUDA_BASE="/usr/local/cuda-11.7/" rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2022.2.0 $CUDA_BASE/nsight-systems-2022.1.3 @@ -244,8 +192,6 @@ function prune_121 { while test $# -gt 0 do case "$1" in - 11.6) install_116; prune_116 - ;; 11.7) install_117; prune_117 ;; 11.8) install_118; prune_118 diff --git a/conda/Dockerfile b/conda/Dockerfile index a58a28511..5954f5b13 100644 --- a/conda/Dockerfile +++ b/conda/Dockerfile @@ -48,10 +48,6 @@ ENV CUDA_HOME=/usr/local/cuda-${CUDA_VERSION} # Make things in our path by default ENV PATH=/usr/local/cuda-${CUDA_VERSION}/bin:$PATH -FROM cuda as cuda11.6 -RUN bash ./install_cuda.sh 11.6 -ENV DESIRED_CUDA=11.6 - FROM cuda as cuda11.7 RUN bash ./install_cuda.sh 11.7 ENV DESIRED_CUDA=11.7 @@ -70,7 +66,6 @@ ADD ./common/install_mnist.sh install_mnist.sh RUN bash ./install_mnist.sh FROM base as all_cuda -COPY --from=cuda11.6 /usr/local/cuda-11.6 /usr/local/cuda-11.6 COPY --from=cuda11.7 /usr/local/cuda-11.7 /usr/local/cuda-11.7 COPY --from=cuda11.8 /usr/local/cuda-11.8 /usr/local/cuda-11.8 COPY --from=cuda12.1 /usr/local/cuda-12.1 /usr/local/cuda-12.1 diff --git a/conda/build_pytorch.sh b/conda/build_pytorch.sh index 30986b408..570c6df11 100755 --- a/conda/build_pytorch.sh +++ b/conda/build_pytorch.sh @@ -271,9 +271,6 @@ else elif [[ "$desired_cuda" == "11.7" ]]; then export CONDA_CUDATOOLKIT_CONSTRAINT=" - pytorch-cuda >=11.7,<11.8 # [not osx]" export MAGMA_PACKAGE=" - magma-cuda117 # [not osx and not win]" - elif [[ "$desired_cuda" == "11.6" ]]; then - export CONDA_CUDATOOLKIT_CONSTRAINT=" - pytorch-cuda >=11.6,<11.7 # [not osx]" - export MAGMA_PACKAGE=" - magma-cuda116 # [not osx and not win]" else echo "unhandled desired_cuda: $desired_cuda" exit 1 diff --git a/conda/pytorch-cuda/conda_build_config.yaml b/conda/pytorch-cuda/conda_build_config.yaml index 67d14f2b1..a343bc40c 100644 --- a/conda/pytorch-cuda/conda_build_config.yaml +++ b/conda/pytorch-cuda/conda_build_config.yaml @@ -1,5 +1,4 @@ version: - - 11.6 - 11.7 - 11.8 target_platform: diff --git a/conda/pytorch-cuda/meta.yaml b/conda/pytorch-cuda/meta.yaml index ecb438ca8..4c3ff4d8a 100644 --- a/conda/pytorch-cuda/meta.yaml +++ b/conda/pytorch-cuda/meta.yaml @@ -12,14 +12,6 @@ # https://conda.anaconda.org/pytorch/noarch/ # https://conda.anaconda.org/pytorch/noarch/repodata.json {% set build = 3 %} -{% set cuda_constraints=">=11.6,<11.7" %} -{% set libcufft_constraints=">=10.7.0.55,<10.7.2.50" %} -{% set libcublas_constraints=">=11.8.1.74,<11.10.1.25" %} -{% set libcusolver_constraints=">=11.3.2.55,<11.3.5.50" %} -{% set libcusparse_constraints=">=11.7.1.55,<11.7.3.50" %} -{% set libnpp_constraints=">=11.6.0.55,<11.7.3.21" %} -{% set libnvjpeg_constraints=">=11.6.0.55,<11.7.2.34" %} -{% if version == '11.7' %} {% set cuda_constraints=">=11.7,<11.8" %} {% set libcufft_constraints=">=10.7.2.50,<10.9.0.58" %} {% set libcublas_constraints=">=11.10.1.25,<11.11.3.6" %} diff --git a/conda/pytorch-nightly/bld.bat b/conda/pytorch-nightly/bld.bat index 18850f758..1b0909f13 100644 --- a/conda/pytorch-nightly/bld.bat +++ b/conda/pytorch-nightly/bld.bat @@ -21,12 +21,6 @@ set CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%desired_cuda% set CUDA_BIN_PATH=%CUDA_PATH%\bin set TORCH_NVCC_FLAGS=-Xfatbin -compress-all set TORCH_CUDA_ARCH_LIST=3.7+PTX;5.0;6.0;6.1;7.0;7.5;8.0;8.6 -if "%desired_cuda%" == "11.5" ( - set TORCH_NVCC_FLAGS=-Xfatbin -compress-all --threads 2 -) -if "%desired_cuda%" == "11.6" ( - set TORCH_NVCC_FLAGS=-Xfatbin -compress-all --threads 2 -) if "%desired_cuda%" == "11.7" ( set TORCH_NVCC_FLAGS=-Xfatbin -compress-all --threads 2 ) diff --git a/conda/pytorch-nightly/build.sh b/conda/pytorch-nightly/build.sh index ad1871ac4..14ed57276 100755 --- a/conda/pytorch-nightly/build.sh +++ b/conda/pytorch-nightly/build.sh @@ -55,14 +55,7 @@ if [[ -n "$build_with_cuda" ]]; then TORCH_CUDA_ARCH_LIST="3.7+PTX;5.0" export USE_STATIC_CUDNN=1 # links cudnn statically (driven by tools/setup_helpers/cudnn.py) - if [[ $CUDA_VERSION == 11.6* ]]; then - TORCH_CUDA_ARCH_LIST="$TORCH_CUDA_ARCH_LIST;6.0;6.1;7.0;7.5;8.0;8.6" - #for cuda 11.5 we use cudnn 8.3.2.44 https://docs.nvidia.com/deeplearning/cudnn/release-notes/rel_8.html - #which does not have single static libcudnn_static.a deliverable to link with - export USE_STATIC_CUDNN=0 - #for cuda 11.5 include all dynamic loading libraries - DEPS_LIST=(/usr/local/cuda/lib64/libcudnn*.so.8 /usr/local/cuda-11.6/extras/CUPTI/lib64/libcupti.so.11.6) - elif [[ $CUDA_VERSION == 11.7* ]]; then + if [[ $CUDA_VERSION == 11.7* ]]; then TORCH_CUDA_ARCH_LIST="$TORCH_CUDA_ARCH_LIST;6.0;6.1;7.0;7.5;8.0;8.6" #for cuda 11.7 we use cudnn 8.5 #which does not have single static libcudnn_static.a deliverable to link with diff --git a/libtorch/Dockerfile b/libtorch/Dockerfile index 3a116f8b5..6a461b808 100644 --- a/libtorch/Dockerfile +++ b/libtorch/Dockerfile @@ -44,10 +44,6 @@ ADD ./common/install_conda.sh install_conda.sh RUN bash ./install_conda.sh && rm install_conda.sh RUN /opt/conda/bin/conda install -y cmake=3.18 -FROM cuda as cuda11.6 -RUN bash ./install_cuda.sh 11.6 -RUN bash ./install_magma.sh 11.6 - FROM cuda as cuda11.7 RUN bash ./install_cuda.sh 11.7 RUN bash ./install_magma.sh 11.7 diff --git a/libtorch/build_all_docker.sh b/libtorch/build_all_docker.sh index 8d25da9bc..bebb1728a 100755 --- a/libtorch/build_all_docker.sh +++ b/libtorch/build_all_docker.sh @@ -4,7 +4,7 @@ set -eou pipefail TOPDIR=$(git rev-parse --show-toplevel) -for cuda_version in 11.8 11.7 11.6; do +for cuda_version in 11.8 11.7; do GPU_ARCH_TYPE=cuda GPU_ARCH_VERSION="${cuda_version}" "${TOPDIR}/libtorch/build_docker.sh" done diff --git a/manywheel/build_all_docker.sh b/manywheel/build_all_docker.sh index 395f71be3..7a695f51c 100644 --- a/manywheel/build_all_docker.sh +++ b/manywheel/build_all_docker.sh @@ -9,7 +9,7 @@ MANYLINUX_VERSION=2014 GPU_ARCH_TYPE=cpu "${TOPDIR}/manywheel/build_docker.sh" GPU_ARCH_TYPE=cpu-cxx11-abi "${TOPDIR}/manywheel/build_docker.sh" -for cuda_version in 11.7 11.6; do +for cuda_version in 11.8 11.7; do GPU_ARCH_TYPE=cuda GPU_ARCH_VERSION="${cuda_version}" "${TOPDIR}/manywheel/build_docker.sh" MANYLINUX_VERSION=2014 GPU_ARCH_TYPE=cuda GPU_ARCH_VERSION="${cuda_version}" "${TOPDIR}/manywheel/build_docker.sh" done diff --git a/manywheel/build_cuda.sh b/manywheel/build_cuda.sh index 6b5cd9111..83d3c6a3e 100644 --- a/manywheel/build_cuda.sh +++ b/manywheel/build_cuda.sh @@ -108,41 +108,7 @@ elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then LIBGOMP_PATH="/usr/lib/x86_64-linux-gnu/libgomp.so.1" fi -if [[ $CUDA_VERSION == "11.6" ]]; then -export USE_STATIC_CUDNN=0 -DEPS_LIST=( - "/usr/local/cuda/lib64/libcudart.so.11.0" - "/usr/local/cuda/lib64/libnvToolsExt.so.1" - "/usr/local/cuda/lib64/libnvrtc.so.11.2" # this is not a mistake for 11.6 - "/usr/local/cuda/lib64/libnvrtc-builtins.so.11.6" - "/usr/local/cuda/lib64/libcudnn_adv_infer.so.8" - "/usr/local/cuda/lib64/libcudnn_adv_train.so.8" - "/usr/local/cuda/lib64/libcudnn_cnn_infer.so.8" - "/usr/local/cuda/lib64/libcudnn_cnn_train.so.8" - "/usr/local/cuda/lib64/libcudnn_ops_infer.so.8" - "/usr/local/cuda/lib64/libcudnn_ops_train.so.8" - "/usr/local/cuda/lib64/libcudnn.so.8" - "/usr/local/cuda/lib64/libcublas.so.11" - "/usr/local/cuda/lib64/libcublasLt.so.11" - "$LIBGOMP_PATH" -) -DEPS_SONAME=( - "libcudart.so.11.0" - "libnvToolsExt.so.1" - "libnvrtc.so.11.2" - "libnvrtc-builtins.so.11.6" - "libcudnn_adv_infer.so.8" - "libcudnn_adv_train.so.8" - "libcudnn_cnn_infer.so.8" - "libcudnn_cnn_train.so.8" - "libcudnn_ops_infer.so.8" - "libcudnn_ops_train.so.8" - "libcudnn.so.8" - "libcublas.so.11" - "libcublasLt.so.11" - "libgomp.so.1" -) -elif [[ $CUDA_VERSION == "11.7" || $CUDA_VERSION == "11.8" ]]; then +if [[ $CUDA_VERSION == "11.7" || $CUDA_VERSION == "11.8" ]]; then export USE_STATIC_CUDNN=0 # Try parallelizing nvcc as well export TORCH_NVCC_FLAGS="-Xfatbin -compress-all --threads 2" diff --git a/windows/cuda116.bat b/windows/cuda116.bat deleted file mode 100644 index 7a1613c5c..000000000 --- a/windows/cuda116.bat +++ /dev/null @@ -1,58 +0,0 @@ -@echo off - -set MODULE_NAME=pytorch - -IF NOT EXIST "setup.py" IF NOT EXIST "%MODULE_NAME%" ( - call internal\clone.bat - cd .. -) ELSE ( - call internal\clean.bat -) -IF ERRORLEVEL 1 goto :eof - -call internal\check_deps.bat -IF ERRORLEVEL 1 goto :eof - -REM Check for optional components - -set USE_CUDA= -set CMAKE_GENERATOR=Visual Studio 15 2017 Win64 - -IF "%NVTOOLSEXT_PATH%"=="" ( - IF EXIST "C:\Program Files\NVIDIA Corporation\NvToolsExt\lib\x64\nvToolsExt64_1.lib" ( - set NVTOOLSEXT_PATH=C:\Program Files\NVIDIA Corporation\NvToolsExt - ) ELSE ( - echo NVTX ^(Visual Studio Extension ^for CUDA^) ^not installed, failing - exit /b 1 - ) -) - -IF "%CUDA_PATH_V116%"=="" ( - IF EXIST "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.6\bin\nvcc.exe" ( - set "CUDA_PATH_V116=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.6" - ) ELSE ( - echo CUDA 11.6 not found, failing - exit /b 1 - ) -) - -IF "%BUILD_VISION%" == "" ( - set TORCH_CUDA_ARCH_LIST=3.7+PTX;5.0;6.0;6.1;7.0;7.5;8.0;8.6 - set TORCH_NVCC_FLAGS=-Xfatbin -compress-all -) ELSE ( - set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -) - -set "CUDA_PATH=%CUDA_PATH_V116%" -set "PATH=%CUDA_PATH_V116%\bin;%PATH%" - -:optcheck - -call internal\check_opts.bat -IF ERRORLEVEL 1 goto :eof - -call internal\copy.bat -IF ERRORLEVEL 1 goto :eof - -call internal\setup.bat -IF ERRORLEVEL 1 goto :eof diff --git a/windows/internal/cuda_install.bat b/windows/internal/cuda_install.bat index b4f11a58a..b38821f60 100644 --- a/windows/internal/cuda_install.bat +++ b/windows/internal/cuda_install.bat @@ -19,40 +19,12 @@ set CUDNN_LIB_FOLDER="lib\x64" :: Skip all of this if we already have cuda installed if exist "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\bin\nvcc.exe" goto set_cuda_env_vars -if %CUDA_VER% EQU 116 goto cuda116 if %CUDA_VER% EQU 117 goto cuda117 if %CUDA_VER% EQU 118 goto cuda118 echo CUDA %CUDA_VERSION_STR% is not supported exit /b 1 -:cuda116 - -set CUDA_INSTALL_EXE=cuda_11.6.0_511.23_windows.exe -if not exist "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" ( - curl -k -L "https://ossci-windows.s3.amazonaws.com/%CUDA_INSTALL_EXE%" --output "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" - if errorlevel 1 exit /b 1 - set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" - set "ARGS=thrust_11.6 nvcc_11.6 cuobjdump_11.6 nvprune_11.6 nvprof_11.6 cupti_11.6 cublas_11.6 cublas_dev_11.6 cudart_11.6 cufft_11.6 cufft_dev_11.6 curand_11.6 curand_dev_11.6 cusolver_11.6 cusolver_dev_11.6 cusparse_11.6 cusparse_dev_11.6 npp_11.6 npp_dev_11.6 nvrtc_11.6 nvrtc_dev_11.6 nvml_dev_11.6" -) - -set CUDNN_FOLDER=cudnn-windows-x86_64-8.3.2.44_cuda11.5-archive -set CUDNN_LIB_FOLDER="lib" -set "CUDNN_INSTALL_ZIP=%CUDNN_FOLDER%.zip" -if not exist "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" ( - curl -k -L "http://s3.amazonaws.com/ossci-windows/%CUDNN_INSTALL_ZIP%" --output "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" - if errorlevel 1 exit /b 1 - set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" -) - -@REM Cuda 8.3+ required zlib to be installed on the path -echo Installing ZLIB dlls -curl -k -L "http://s3.amazonaws.com/ossci-windows/zlib123dllx64.zip" --output "%SRC_DIR%\temp_build\zlib123dllx64.zip" -7z x "%SRC_DIR%\temp_build\zlib123dllx64.zip" -o"%SRC_DIR%\temp_build\zlib" -xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32" - -goto cuda_common - :cuda117 set CUDA_INSTALL_EXE=cuda_11.7.0_516.01_windows.exe diff --git a/windows/internal/smoke_test.bat b/windows/internal/smoke_test.bat index 2e1b1b243..afe285716 100644 --- a/windows/internal/smoke_test.bat +++ b/windows/internal/smoke_test.bat @@ -65,9 +65,6 @@ set "CONDA_HOME=%CD%\conda" set "tmp_conda=%CONDA_HOME%" set "miniconda_exe=%CD%\miniconda.exe" set "CONDA_EXTRA_ARGS=cpuonly -c pytorch-nightly" -if "%CUDA_VERSION%" == "116" ( - set "CONDA_EXTRA_ARGS=pytorch-cuda=11.6 -c nvidia -c pytorch-nightly" -) if "%CUDA_VERSION%" == "117" ( set "CONDA_EXTRA_ARGS=pytorch-cuda=11.7 -c nvidia -c pytorch-nightly" )