Skip to content

Commit

Permalink
update to 1.12.0
Browse files Browse the repository at this point in the history
* Update Python dependencies following upstream [1].
* Rebase patches for devendoring after upstream changes [2].
* Avoid wheel.vendored, which is needed since [3] while devendored in
  Arch [4].

[1] microsoft/onnxruntime#11522
[2] microsoft/onnxruntime#11146
[3] microsoft/onnxruntime#11834
[4] archlinux/svntogit-community@e691288
  • Loading branch information
yan12125 committed Jul 23, 2022
1 parent 8d347be commit c6eddeb
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 44 deletions.
20 changes: 10 additions & 10 deletions .SRCINFO
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
pkgbase = python-onnxruntime
pkgdesc = Cross-platform, high performance scoring engine for ML models
pkgver = 1.11.1
pkgver = 1.12.0
pkgrel = 1
url = https://github.com/microsoft/onnxruntime
arch = x86_64
Expand All @@ -20,26 +20,26 @@ pkgbase = python-onnxruntime
makedepends = gcc11
depends = nsync
depends = re2
depends = python-flatbuffers
depends = python-numpy
depends = python-protobuf
depends = openmpi
depends = onednn
depends = libprotobuf-lite.so
depends = python-coloredlogs
depends = python-flatbuffers
depends = python-numpy
depends = python-packaging
depends = python-protobuf
depends = python-sympy
optdepends = python-onnx: for the backend API, quantization, orttraining, transformers and various tools
optdepends = python-coloredlogs: for transformers
optdepends = python-psutil: for transformers
optdepends = python-py-cpuinfo: for transformers
optdepends = python-py3nvml: for transformers
optdepends = python-packaging: for transformers and various tools
optdepends = python-transformers: for transformers
optdepends = python-scipy: for transformers and various tools
optdepends = python-pytorch: for transformers, orttraining and various tools
optdepends = python-cerberus: for orttraining
optdepends = python-h5py: for orttraining
optdepends = python-sympy: for transformers and various tools
options = !lto
source = git+https://github.com/microsoft/onnxruntime#tag=v1.11.1
source = git+https://github.com/microsoft/onnxruntime#tag=v1.12.0
source = git+https://github.com/onnx/onnx.git
source = git+https://github.com/dcleblanc/SafeInt.git
source = git+https://github.com/tensorflow/tensorboard.git
Expand All @@ -56,8 +56,8 @@ pkgbase = python-onnxruntime
sha512sums = SKIP
sha512sums = SKIP
sha512sums = SKIP
sha512sums = 80ea85ea20bbbdec7991f965a66b627a5f42828bc0c72be0913078d927833a82402fb1af6c5c9f6ecae861b45582fa42c98ce83b02768e4bf875ab89dd1c607c
sha512sums = 06a002361cc324184d0bfcb520b472f57749c0537329f0e0dee833cc7fce2f08b14590b77bc0211422dfb933dbef6f249f19939f9e0df465c48ee8fc7827e31c
sha512sums = ab0446ede08e528ca631a73e536ff42009ee8f152972d37050b2f9b44b3d1c06d19bd8a91c31b09c26f5db1482a699b8fe2c221b78199199dfa245728856b196
sha512sums = 7d55b0d4232183a81c20a5049f259872150536eed799d81a15e7f10b5c8b5279b443ba96d7b97c0e4338e95fc18c9d6f088e348fc7002256ee7170d25b27d80d
sha512sums = 6735c7aca2ba2f1f2a5286eb064125bf7f2c68a575d572dd157769d15778ff3e717b3a53d696c767748229f23ee6c3a7c82679df1d86283d7c4dd0ec9103ae08

pkgname = python-onnxruntime
Expand Down
12 changes: 5 additions & 7 deletions PKGBUILD
Original file line number Diff line number Diff line change
Expand Up @@ -6,28 +6,26 @@ pkgbase=python-onnxruntime
# Not split DNNL EP to another package as it's needed unconditionally at runtime if built at compile time
# https://github.com/microsoft/onnxruntime/blob/v1.9.1/onnxruntime/python/onnxruntime_pybind_state.cc#L533
pkgname=(python-onnxruntime)
pkgver=1.11.1
pkgver=1.12.0
pkgdesc='Cross-platform, high performance scoring engine for ML models'
pkgrel=1
arch=(x86_64)
url='https://github.com/microsoft/onnxruntime'
license=(MIT)
depends=(nsync re2 python-flatbuffers python-numpy python-protobuf openmpi onednn libprotobuf-lite.so)
depends=(nsync re2 openmpi onednn libprotobuf-lite.so
python-coloredlogs python-flatbuffers python-numpy python-packaging python-protobuf python-sympy)
makedepends=(git cmake pybind11 python-setuptools nlohmann-json chrono-date boost eigen flatbuffers)
optdepends=(
# https://github.com/microsoft/onnxruntime/pull/9969
'python-onnx: for the backend API, quantization, orttraining, transformers and various tools'
'python-coloredlogs: for transformers' # also used by TensorRT tools, but we don't build for it, anyway
'python-psutil: for transformers'
'python-py-cpuinfo: for transformers'
'python-py3nvml: for transformers'
'python-packaging: for transformers and various tools'
'python-transformers: for transformers'
'python-scipy: for transformers and various tools'
'python-pytorch: for transformers, orttraining and various tools'
'python-cerberus: for orttraining'
'python-h5py: for orttraining'
'python-sympy: for transformers and various tools'
)
# not de-vendored libraries
# onnx: needs shared libonnx (https://github.com/onnx/onnx/issues/3030)
Expand All @@ -48,8 +46,8 @@ sha512sums=('SKIP'
'SKIP'
'SKIP'
'SKIP'
'80ea85ea20bbbdec7991f965a66b627a5f42828bc0c72be0913078d927833a82402fb1af6c5c9f6ecae861b45582fa42c98ce83b02768e4bf875ab89dd1c607c'
'06a002361cc324184d0bfcb520b472f57749c0537329f0e0dee833cc7fce2f08b14590b77bc0211422dfb933dbef6f249f19939f9e0df465c48ee8fc7827e31c'
'ab0446ede08e528ca631a73e536ff42009ee8f152972d37050b2f9b44b3d1c06d19bd8a91c31b09c26f5db1482a699b8fe2c221b78199199dfa245728856b196'
'7d55b0d4232183a81c20a5049f259872150536eed799d81a15e7f10b5c8b5279b443ba96d7b97c0e4338e95fc18c9d6f088e348fc7002256ee7170d25b27d80d'
'6735c7aca2ba2f1f2a5286eb064125bf7f2c68a575d572dd157769d15778ff3e717b3a53d696c767748229f23ee6c3a7c82679df1d86283d7c4dd0ec9103ae08')
# CUDA seems not working with LTO
options+=('!lto')
Expand Down
46 changes: 27 additions & 19 deletions build-fixes.patch
Original file line number Diff line number Diff line change
Expand Up @@ -2,38 +2,46 @@ diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt
index a027c69e0..eb7608518 100644
--- a/cmake/CMakeLists.txt
+++ b/cmake/CMakeLists.txt
@@ -841,7 +841,9 @@ add_library(safeint_interface INTERFACE)
target_include_directories(safeint_interface INTERFACE ${SAFEINT_INCLUDE_DIR})
@@ -733,7 +733,7 @@

if (onnxruntime_PREFER_SYSTEM_LIB)
- find_package(boost_mp11)
+ # boost on Arch does not support find_package(boost_mp11)
+ add_library(boost_mp11 INTERFACE)
+ add_library(Boost::mp11 ALIAS boost_mp11)
if (NOT WIN32)
if (onnxruntime_PREFER_SYSTEM_LIB)
- find_package(nsync)
+ find_package(nsync_cpp)
endif()
if (TARGET nsync_cpp) # linking error with nsync_FOUND (why?)
message("Use nsync from preinstalled system lib")
@@ -764,9 +765,11 @@ if(onnxruntime_DISABLE_EXCEPTIONS)
endif()
if (NOT TARGET Boost::mp11)
add_subdirectory(external/mp11 EXCLUDE_FROM_ALL)
@@ -764,10 +765,12 @@ if(onnxruntime_DISABLE_EXCEPTIONS)

set(JSON_BuildTests OFF CACHE INTERNAL "")
set(JSON_Install OFF CACHE INTERNAL "")
-add_subdirectory(external/json EXCLUDE_FROM_ALL)
+find_package(nlohmann_json REQUIRED)

if (onnxruntime_PREFER_SYSTEM_LIB)
- find_package(re2)
+ find_package(PkgConfig)
+ pkg_check_modules(RE2 IMPORTED_TARGET re2)
+ add_library(re2::re2 ALIAS PkgConfig::RE2)
endif()
if (NOT TARGET re2::re2)
-if (re2_FOUND)
+if (TARGET re2::re2)
message("Use re2 from preinstalled system lib")
else()
add_subdirectory(external/re2 EXCLUDE_FROM_ALL)
@@ -1421,7 +1421,7 @@
find_package(Flatbuffers)
endif()
if (Flatbuffers_FOUND)
message("Use flatbuffers from preinstalled system lib")
- add_library(flatbuffers ALIAS flatbuffers::flatbuffers)
+ add_library(flatbuffers ALIAS flatbuffers::flatbuffers_shared)
else()
add_subdirectory(external/flatbuffers EXCLUDE_FROM_ALL)
endif()
message("Use flatbuffers from submodule")
# We do not need to build flatc for iOS or Android Cross Compile
--- a/setup.py 2022-07-22 17:00:19.638893453 +0800
+++ b/setup.py 2022-07-22 17:02:00.686317628 +0800
@@ -16,7 +16,7 @@

from setuptools import Extension, setup
from setuptools.command.install import install as InstallCommandBase
-from wheel.vendored.packaging.tags import sys_tags
+from packaging.tags import sys_tags

nightly_build = False
package_name = "onnxruntime"
16 changes: 8 additions & 8 deletions install-orttraining-files.diff
Original file line number Diff line number Diff line change
@@ -1,18 +1,18 @@
--- a/setup.py 2021-12-29 22:44:09.924917943 +0800
+++ b/setup.py 2021-12-29 22:49:16.216878004 +0800
@@ -355,7 +355,7 @@
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS'])
if not enable_training:
classifiers.extend(["Operating System :: Microsoft :: Windows", "Operating System :: MacOS"])

-if enable_training:
+if True:
packages.extend(['onnxruntime.training',
'onnxruntime.training.amp',
'onnxruntime.training.optim',
packages.extend(
[
"onnxruntime.training",
@@ -373,6 +373,7 @@
package_data['onnxruntime.training.ortmodule.torch_cpp_extensions.cuda.torch_gpu_allocator'] = ['*.cc']
package_data['onnxruntime.training.ortmodule.torch_cpp_extensions.cuda.fused_ops'] = \
['*.cpp', '*.cu', '*.cuh', '*.h']
'*.cuh',
'*.h'
]
+if enable_training:
requirements_file = "requirements-training.txt"
# with training, we want to follow this naming convention:
Expand Down

0 comments on commit c6eddeb

Please sign in to comment.