diff --git a/.gitattributes b/.gitattributes index 22636f9..18f114a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,6 +1,6 @@ * text=auto -*.patch text +*.patch binary *.diff binary meta.yaml text eol=lf build.sh text eol=lf diff --git a/abs.yaml b/abs.yaml index 8e1871b..0756ca3 100644 --- a/abs.yaml +++ b/abs.yaml @@ -2,7 +2,3 @@ # variant, so it's specified for both. extra_labels_for_os: osx-arm64: [ventura] -aggregate_check: false - -channels: - - https://staging.continuum.io/prefect/fs/sympy-feedstock/pr10/3afd78c diff --git a/recipe/bld.bat b/recipe/bld.bat index c4221a7..c228fa0 100644 --- a/recipe/bld.bat +++ b/recipe/bld.bat @@ -1,4 +1,5 @@ @echo On +setlocal enabledelayedexpansion :: The PyTorch test suite includes some symlinks, which aren't resolved on Windows, leading to packaging errors. :: ATTN! These change and have to be updated manually, often with each release. @@ -6,9 +7,10 @@ :: for a failure with error message: "conda_package_handling.exceptions.ArchiveCreationError: Cannot stat :: while writing file") -set TH_BINARY_BUILD=1 set PYTORCH_BUILD_VERSION=%PKG_VERSION% -set PYTORCH_BUILD_NUMBER=%PKG_BUILDNUM% +:: Always pass 0 to avoid appending ".post" to version string. +:: https://github.com/conda-forge/pytorch-cpu-feedstock/issues/315 +set PYTORCH_BUILD_NUMBER=0 :: uncomment to debug cmake build :: set CMAKE_VERBOSE_MAKEFILE=1 @@ -21,6 +23,12 @@ if "%pytorch_variant%" == "gpu" ( set USE_CUDA=0 ) +:: KINETO seems to require CUPTI and will look quite hard for it. +:: CUPTI seems to cause trouble when users install a version of +:: cudatoolkit different than the one specified at compile time. +:: https://github.com/conda-forge/pytorch-cpu-feedstock/issues/135 +set "USE_KINETO=OFF" + :: =============================== CUDA FLAGS> ====================================== if "%build_with_cuda%" == "" goto cuda_flags_end @@ -41,6 +49,7 @@ set USE_MKLDNN=1 set USE_TENSORPIPE=0 set DISTUTILS_USE_SDK=1 set BUILD_TEST=0 +set INSTALL_TEST=0 :: Don't increase MAX_JOBS to NUMBER_OF_PROCESSORS, as it will run out of heap set CPU_COUNT=1 set MAX_JOBS=%CPU_COUNT% @@ -64,9 +73,12 @@ set CUDNN_INCLUDE_DIR=%LIBRARY_PREFIX%\include :: =============================== CUDA< ====================================== set CMAKE_GENERATOR=Ninja +set "CMAKE_GENERATOR_TOOLSET=" set "CMAKE_GENERATOR_PLATFORM=" set "CMAKE_PREFIX_PATH=%LIBRARY_PREFIX%" -set CMAKE_BUILD_TYPE=Release +set "CMAKE_INCLUDE_PATH=%LIBRARY_INC%" +set "CMAKE_LIBRARY_PATH=%LIBRARY_LIB%" +set "CMAKE_BUILD_TYPE=Release" :: This is so that CMake finds the environment's Python, not another one set Python_EXECUTABLE=%PYTHON% set Python3_EXECUTABLE=%PYTHON% @@ -81,10 +93,72 @@ set BLAS=MKL set INTEL_MKL_DIR=%LIBRARY_PREFIX% set "libuv_ROOT=%LIBRARY_PREFIX%" -set "USE_SYSTEM_SLEEF=OFF" -:: Note that BUILD_CUSTOM_PROTOBUF=OFF (which would use our protobuf) doesn't work properly as of last testing, and results in -:: duplicate symbols at link time. -:: set "BUILD_CUSTOM_PROTOBUF=OFF" +set "USE_SYSTEM_SLEEF=ON" + +:: Use our protobuf +set "BUILD_CUSTOM_PROTOBUF=OFF" +set "USE_LITE_PROTO=ON" + +:: Here we split the build into two parts. +:: +:: Both the packages libtorch and pytorch use this same build script. +:: - The output of the libtorch package should just contain the binaries that are +:: not related to Python. +:: - The output of the pytorch package contains everything except for the +:: non-python specific binaries. +:: +:: This ensures that a user can quickly switch between python versions without the +:: need to redownload all the large CUDA binaries. + +if "%PKG_NAME%" == "libtorch" ( + :: For the main script we just build a wheel for libtorch so that the C++/CUDA + :: parts are built. Then they are reused in each python version. + + %PYTHON% setup.py bdist_wheel + :: Extract the compiled wheel into a temporary directory + if not exist "%SRC_DIR%/dist" mkdir %SRC_DIR%/dist + pushd %SRC_DIR%/dist + for %%f in (../torch-*.whl) do ( + wheel unpack %%f + ) + + :: Navigate into the unpacked wheel + pushd torch-* + + :: Move the binaries into the packages site-package directory + robocopy /NP /NFL /NDL /NJH /E torch\bin %SP_DIR%\torch\bin\ + robocopy /NP /NFL /NDL /NJH /E torch\lib %SP_DIR%\torch\lib\ + robocopy /NP /NFL /NDL /NJH /E torch\share %SP_DIR%\torch\share\ + for %%f in (ATen caffe2 torch c10) do ( + robocopy /NP /NFL /NDL /NJH /E torch\include\%%f %SP_DIR%\torch\include\%%f\ + ) + + :: Remove the python binary file, that is placed in the site-packages + :: directory by the specific python specific pytorch package. + del %SP_DIR%\torch\lib\torch_python.* + + popd + popd +) else ( + :: NOTE: Passing --cmake is necessary here since the torch frontend has its + :: own cmake files that it needs to generate + %PYTHON% setup.py clean + %PYTHON% setup.py bdist_wheel --cmake + %PYTHON% -m pip install --find-links=dist torch --no-build-isolation --no-deps + rmdir /s /q %SP_DIR%\torch\bin + rmdir /s /q %SP_DIR%\torch\share + for %%f in (ATen caffe2 torch c10) do ( + rmdir /s /q %SP_DIR%\torch\include\%%f + ) + + :: Delete all files from the lib directory that do not start with torch_python + for %%f in (%SP_DIR%\torch\lib\*) do ( + set "FILENAME=%%~nf" + if "!FILENAME:~0,12!" neq "torch_python" ( + del %%f + ) + ) +) -%PYTHON% -m pip install . --no-deps --no-build-isolation -vv if errorlevel 1 exit /b 1 + diff --git a/recipe/build_pytorch.bat b/recipe/build_pytorch.bat index 88ba130..5b75a8d 100644 --- a/recipe/build_pytorch.bat +++ b/recipe/build_pytorch.bat @@ -1 +1,5 @@ +@echo On +setlocal enabledelayedexpansion + call %RECIPE_DIR%\bld.bat +if errorlevel 1 exit /b 1 \ No newline at end of file diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 638bdcc..21f54c7 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -57,7 +57,8 @@ source: {% endif %} - url: https://raw.githubusercontent.com/pytorch/builder/{{ smoke_test_commit }}/test/smoke_test/smoke_test.py folder: smoke_test - + # The .gitignore is needed in order to run upstreams `setup.py clean` + - url: https://raw.githubusercontent.com/pytorch/pytorch/refs/tags/v{{ version }}/.gitignore build: number: {{ build }} @@ -75,7 +76,6 @@ build: - python * # [megabuild] - numpy * # [megabuild] skip: True # [py<39] - skip: True # [win] requirements: # Keep this list synchronized (except for python*, numpy*) in outputs @@ -114,12 +114,13 @@ requirements: # This has a strong run_export so we don't need to put it in `host` or `run` # We use llvm-openmp for openblas variants on osx. - llvm-openmp 14.0.6 # [osx and not (blas_impl == "mkl")] + - libuv # [win] - cmake - ninja-base # Keep libprotobuf here so that a compatibile version # of protobuf is installed between build and host - - libprotobuf # [not win] - - protobuf # [not win] + - libprotobuf + - protobuf - make # [linux] # Uncomment to use ccache, see README and build_pytorch.sh # - ccache @@ -147,15 +148,16 @@ requirements: # other requirements - python 3.12 # [megabuild] - python # [not megabuild] - - numpy 2.* + - numpy 2 - pip - - setuptools + # Upper bound due to https://github.com/pytorch/pytorch/issues/136541 + - setuptools <=72.1.0 - wheel - pyyaml - requests - future - six - - mkl-devel {{ mkl }}.* # [blas_impl == "mkl"] + - mkl-devel {{ mkl }} # [blas_impl == "mkl"] - openblas-devel {{ openblas }} # [blas_impl == "openblas"] # - libcblas * *_mkl # [blas_impl == "mkl"] # - libcblas # [blas_impl != "mkl"] @@ -167,8 +169,8 @@ requirements: - intel-openmp {{ mkl }} # [blas_impl == "mkl"] - llvm-openmp 14.0.6 # [osx and not (blas_impl == "mkl")] - libabseil - - libprotobuf {{ libprotobuf }} # [not win] - - sleef 3.5.1 # [not win] + - libprotobuf {{ libprotobuf }} + - sleef 3.5.1 - typing - libuv - pkg-config # [unix] @@ -180,6 +182,7 @@ requirements: # satisfy overlinking checks run: - {{ pin_compatible('intel-openmp') }} # [blas_impl == "mkl"] + - libuv # [win] # these tests are for the libtorch output below, but due to # a particularity of conda-build, that output is defined in @@ -199,6 +202,13 @@ outputs: - name: libtorch build: missing_dso_whitelist: + # The are dynamically loaded from %SP_DIR%\torch\lib\ + - "**/asmjit.dll" # [win] + - "**/c10.dll" # [win] + - "**/fbgemm.dll" # [win] + - "**/shm.dll" # [win] + - "**/torch_cpu.dll" # [win] + - "**/torch_python.dll" # [win] - $RPATH/ld64.so.1 # [s390x] - name: pytorch build: @@ -210,13 +220,19 @@ outputs: ignore_run_exports: # [osx] - libuv # [osx] missing_dso_whitelist: + # The are dynamically loaded from %SP_DIR%\torch\lib\ + - "**/asmjit.dll" # [win] + - "**/c10.dll" # [win] + - "**/fbgemm.dll" # [win] + - "**/shm.dll" # [win] + - "**/torch_cpu.dll" # [win] + - "**/torch_python.dll" # [win] - $RPATH/ld64.so.1 # [s390x] detect_binary_files_with_prefix: false run_exports: - {{ pin_subpackage('pytorch', max_pin='x.x') }} - {{ pin_subpackage('libtorch', max_pin='x.x') }} skip: True # [py<39] - skip: True # [win] script: build_pytorch.sh # [unix] script: build_pytorch.bat # [win] @@ -256,8 +272,8 @@ outputs: - ninja-base # Keep libprotobuf here so that a compatibile version # of protobuf is installed between build and host - - libprotobuf # [not win] - - protobuf # [not win] + - libprotobuf + - protobuf - make # [linux] # Uncomment to use ccache, see README and build_pytorch.sh # - ccache @@ -283,15 +299,15 @@ outputs: {% endif %} # other requirements - python - - numpy 2.* + - numpy 2 - pip - - setuptools + - setuptools <=72.1.0 - wheel - pyyaml - requests - future - six - - mkl-devel {{ mkl }}.* # [blas_impl == "mkl"] + - mkl-devel {{ mkl }} # [blas_impl == "mkl"] - openblas-devel {{ openblas }} # [blas_impl == "openblas"] # - libcblas * *_mkl # [blas_impl == "mkl"] # - libcblas # [blas_impl != "mkl"] @@ -303,8 +319,8 @@ outputs: - intel-openmp {{ mkl }} # [blas_impl == "mkl"] - llvm-openmp 14.0.6 # [osx and not (blas_impl == "mkl")] - libabseil - - libprotobuf {{ libprotobuf }} # [not win] - - sleef 3.5.1 # [not win] + - libprotobuf {{ libprotobuf }} + - sleef 3.5.1 - typing - libuv - pkg-config # [unix] @@ -371,6 +387,8 @@ outputs: - pytest-rerunfailures - pytest-flakefinder - pytest-xdist + # Needed for test_autograd.py + - pybind11 imports: - torch source_files: @@ -379,6 +397,8 @@ outputs: # as of pytorch=2.0.0, there is a bug when trying to run tests without the tools - tools - smoke_test + # See files needed: https://github.com/pytorch/pytorch/blob/main/test/test_ops.py#L271-L274 + - aten/src/ATen/native commands: # the smoke test script takes a bunch of env variables, defined below - set MATRIX_GPU_ARCH_VERSION="{{ '.'.join(cudatoolkit.split('.')[:2]) }}" # [(gpu_variant == "cuda-11") and (win)]