diff --git a/recipes/onnxruntime/all/CMakeLists.txt b/recipes/onnxruntime/all/CMakeLists.txt new file mode 100644 index 00000000000000..1848ca5a77c355 --- /dev/null +++ b/recipes/onnxruntime/all/CMakeLists.txt @@ -0,0 +1,7 @@ +cmake_minimum_required(VERSION 2.8.12) +project(cmake_wrapper) + +include(conanbuildinfo.cmake) +conan_basic_setup() + +add_subdirectory("source_subfolder") diff --git a/recipes/onnxruntime/all/conandata.yml b/recipes/onnxruntime/all/conandata.yml new file mode 100644 index 00000000000000..28ba0fd5e36dcd --- /dev/null +++ b/recipes/onnxruntime/all/conandata.yml @@ -0,0 +1,14 @@ +sources: + "1.7.1": + sha256: e24ff9a2a21dab74e6dfa821e9a267baa10e9423ffae5f5e404e3a2949eb41e2 + url: https://github.com/microsoft/onnxruntime/archive/v1.7.1.tar.gz + "1.5.3": + sha256: 40fc79f8a3126caba5fcdced260439c7b18c72214fb8f7268d87fac22098dfba + url: https://github.com/microsoft/onnxruntime/archive/v1.5.3.tar.gz + "1.2.0": + sha256: 1ed2a4303d621682c42ebaf99fb9a8ecdc386035b15925b148aa56944abc38c8 + url: https://github.com/microsoft/onnxruntime/archive/v1.2.0.tar.gz +patches: + "1.7.1": [] + "1.5.3": [] + "1.2.0": [] diff --git a/recipes/onnxruntime/all/conanfile.py b/recipes/onnxruntime/all/conanfile.py new file mode 100644 index 00000000000000..bbe3cbbc5069cd --- /dev/null +++ b/recipes/onnxruntime/all/conanfile.py @@ -0,0 +1,227 @@ +import os +from conans import ConanFile, tools + + +class OnnxRuntimeConan(ConanFile): + name = "onnxruntime" + description = "ONNX Runtime: cross-platform, high performance scoring " \ + "engine for ML models https://aka.ms/onnxruntime" + license = "MIT License" + topics = ( + "deep-learning", + "onnx", + "neural-networks", + "machine-learning", + "ai-framework", + "hardware-acceleration" + ) + homepage = "https://www.onnxruntime.ai/" + url = "https://github.com/conan-io/conan-center-index" + + settings = "os", "compiler", "build_type", "arch", "os_build" + + options = { + "shared": [True, False], + "parallel": [True, False], + "force_min_size_rel": [True, False], + "with_dml": [True, False], + "with_openmp": [True, False], + "with_dnnl": [True, False], + "with_nnapi": [True, False], + "with_winml": [True, False], + "with_python_wheel": [True, False], + "with_csharp": [True, False], + "with_java": [True, False], + "with_tests": [True, False] + } + + default_options = {k: False for k, v in options.items()} + + @property + def _source_subfolder(self): + return "source_subfolder" + + def build_requirements(self): + self.build_requires("protobuf/3.11.3") + + def source(self): + # Unfortunatelly git do not pack submodules, so code below doesn't work + # + # tools.get(**self.conan_data["sources"][self.version]) + # os.rename(f"onnxruntime-{self.version}", self._source_subfolder) + # + # Using tools.Git to get it: + git = tools.Git(folder=self._source_subfolder) + git.clone("https://github.com/microsoft/onnxruntime.git") + git.checkout(f"v{self.version}", submodule="recursive") + + def build(self): + is_windows = self.settings.os_build == "Windows" + build_script = ".\\build.bat" if is_windows else "./build.sh" + build_args = ["--skip_submodule_sync"] + + if self.options.force_min_size_rel: + build_args.extend(["--config", "MinSizeRel"]) + elif self.settings.build_type is not None: + build_args.extend(["--config", str(self.settings.build_type)]) + + if self.options.shared: + build_args.append("--build_shared_lib") + + if self.options.parallel: + build_args.append("--parallel") + + if self.options.with_openmp: + build_args.append("--use_openmp") + + if self.options.with_dnnl: + build_args.append("--use_dnnl") + + if self.options.with_nnapi: + build_args.append("--use_nnapi") + + if self.options.with_winml: + build_args.append("--use_winml") + + if self.options.with_dml: + build_args.append("--use_dml") + + if self.options.with_python_wheel: + build_args.append("--build_wheel") + + if self.options.with_csharp: + build_args.append("--build_csharp") + + if self.options.with_java: + build_args.append("--use_java") + + if self.options.with_tests: + build_args.append("--tests") + else: + build_args.append("--skip_tests") + + build_args.extend([ + "--cmake_extra_defines", + f"CMAKE_INSTALL_PREFIX={self.package_folder}" + ]) + + # TODO full list of options in + # https://github.com/microsoft/onnxruntime/blob/master/BUILD.md + + if self.settings.os == "Android": + build_args.append("--android") + + android_ndk_root = None + android_sdk_root = None + if "ANDROID_NDK_ROOT" in os.environ: + android_ndk_root = os.environ.get("ANDROID_NDK_ROOT") + elif "ANDROID_NDK_HOME" in os.environ: + android_ndk_root = os.environ.get("ANDROID_NDK_HOME") + + if "ANDROID_SDK_ROOT" in os.environ: + android_sdk_root = os.environ.get("ANDROID_SDK_ROOT") + elif "ANDROID_SDK_HOME" in os.environ: + android_sdk_root = os.environ.get("ANDROID_SDK_HOME") + elif "ANDROID_HOME" in os.environ: + android_sdk_root = os.environ.get("ANDROID_HOME") + + if android_ndk_root is None: + ndk_bundle_path = os.path.join(android_sdk_root, "ndk-bundle") + if os.path.isdir(ndk_bundle_path): + android_ndk_root = ndk_bundle_path + + if android_ndk_root is None: + raise Exception("ANDROID_NDK_ROOT env not defined") + if android_sdk_root is None: + raise Exception("ANDROID_SDK_ROOT env not defined") + + build_args.extend(["--android_sdk_path", android_sdk_root]) + build_args.extend(["--android_ndk_path", android_ndk_root]) + build_args.extend([ + "--android_abi", + tools.to_android_abi(self.settings.arch) + ]) + + if self.settings.os.api_level: + build_args.extend([ + "--android_api", str(self.settings.os.api_level) + ]) + + with tools.chdir(self._source_subfolder): + self.run(" ".join([build_script] + build_args)) + + def package(self): + build_dir = self._onnxruntime_build_dir + self.run(f"cmake --build {build_dir} --target install") + + if not self.options.shared: + self.copy(pattern="*.a", dst="lib", src=build_dir, keep_path=False) + + self.copy( + "onnxruntime_perf_test", + dst="bin", + src=build_dir, + keep_path=False + ) + + providers_inc = "include/onnxruntime/core/providers" + for provider in ["nnapi", "dnnl"]: + if getattr(self.options, f"with_{provider}"): + dst = os.path.join(providers_inc, provider) + self.copy( + pattern="*.h", + dst=dst, + src=os.path.join(self.name, dst) + ) + + def package_info(self): + if self.options.shared: + self.cpp_info.libs = ["onnxruntime"] + else: + debug_suffix = "d" if self.settings.build_type == "Debug" else "" + + onnxruntime_libs = [] + + if self.options.with_nnapi: + onnxruntime_libs.append("providers_nnapi") + + onnxruntime_libs.extend([ + "session", + "optimizer", + "providers", + "framework", + "graph", + "util", + "mlas", + "common", + "flatbuffers", + ]) + + self.cpp_info.libs = \ + [f"onnxruntime_{lib}" for lib in onnxruntime_libs] + + self.cpp_info.libs.extend([ + "onnx", + "onnx_proto", + "flatbuffers", + "re2", + "nsync_cpp", + "protobuf-lite" + debug_suffix, + ]) + + if self.options.with_dnnl: + self.cpp_info.libs.append("dnnl") + + self.cpp_info.includedirs.append("include/onnxruntime/core/session") + + @property + def _onnxruntime_build_dir(self): + msr = self.options.force_min_size_rel + build_type = "MinSizeRel" if msr else str(self.settings.build_type) + return os.path.join( + self.build_folder, + self._source_subfolder, + "build", + str(self.settings.os), + build_type + ) diff --git a/recipes/onnxruntime/all/test_package/CMakeLists.txt b/recipes/onnxruntime/all/test_package/CMakeLists.txt new file mode 100644 index 00000000000000..01078624049d16 --- /dev/null +++ b/recipes/onnxruntime/all/test_package/CMakeLists.txt @@ -0,0 +1,14 @@ +cmake_minimum_required(VERSION 3.1) +project(test_package) + +include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) +conan_basic_setup(TARGETS) + +find_package(OnnxRuntime REQUIRED) + +set(CMAKE_CXX_STANDARD 11) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +add_executable(${PROJECT_NAME} test_package.cpp) +target_link_libraries(${PROJECT_NAME} ${OnnxRuntime_LIBRARIES}) +target_include_directories(${PROJECT_NAME} PRIVATE ${OnnxRuntime_INCLUDE_DIRS}) diff --git a/recipes/onnxruntime/all/test_package/conanfile.py b/recipes/onnxruntime/all/test_package/conanfile.py new file mode 100644 index 00000000000000..20a73d2658fc51 --- /dev/null +++ b/recipes/onnxruntime/all/test_package/conanfile.py @@ -0,0 +1,23 @@ +from conans import ConanFile, CMake, tools +import os + + +class OnnxRuntimeTestConan(ConanFile): + settings = "os", "compiler", "build_type", "arch" + generators = "cmake", "cmake_find_package" + + def build(self): + cmake = CMake(self) + cmake.configure() + cmake.build() + + def test(self): + tools.download( + "https://github.com/microsoft/onnxruntime/raw" + "/master/csharp/testdata/squeezenet.onnx", + "squeezenet.onnx" + ) + + if not tools.cross_building(self.settings): + bin_path = os.path.join("bin", "test_package") + self.run(bin_path, run_environment=True) diff --git a/recipes/onnxruntime/all/test_package/test_package.cpp b/recipes/onnxruntime/all/test_package/test_package.cpp new file mode 100644 index 00000000000000..aefed9b0a6b0e8 --- /dev/null +++ b/recipes/onnxruntime/all/test_package/test_package.cpp @@ -0,0 +1,132 @@ +// Copyright(c) Microsoft Corporation.All rights reserved. +// Licensed under the MIT License. +// +// Source: https://github.com/microsoft/onnxruntime/blob/master/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests.Capi/CXX_Api_Sample.cpp + +#include +#include +#include + +int main(int argc, char* argv[]) { + //************************************************************************* + // initialize enviroment...one enviroment per process + // enviroment maintains thread pools and other state info + Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "test"); + + // initialize session options if needed + Ort::SessionOptions session_options; + session_options.SetIntraOpNumThreads(1); + + // If onnxruntime.dll is built with CUDA enabled, we can uncomment out this line to use CUDA for this + // session (we also need to include cuda_provider_factory.h above which defines it) + // #include "cuda_provider_factory.h" + // OrtSessionOptionsAppendExecutionProvider_CUDA(session_options, 1); + + // Sets graph optimization level + // Available levels are + // ORT_DISABLE_ALL -> To disable all optimizations + // ORT_ENABLE_BASIC -> To enable basic optimizations (Such as redundant node removals) + // ORT_ENABLE_EXTENDED -> To enable extended optimizations (Includes level 1 + more complex optimizations like node fusions) + // ORT_ENABLE_ALL -> To Enable All possible opitmizations + session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED); + + //************************************************************************* + // create session and load model into memory + // using squeezenet version 1.3 + // URL = https://github.com/onnx/models/tree/master/squeezenet +#ifdef _WIN32 + const wchar_t* model_path = L"squeezenet.onnx"; +#else + const char* model_path = "squeezenet.onnx"; +#endif + + printf("Using Onnxruntime C++ API\n"); + Ort::Session session(env, model_path, session_options); + + //************************************************************************* + // print model input layer (node names, types, shape etc.) + Ort::AllocatorWithDefaultOptions allocator; + + // print number of model input nodes + size_t num_input_nodes = session.GetInputCount(); + std::vector input_node_names(num_input_nodes); + std::vector input_node_dims; // simplify... this model has only 1 input node {1, 3, 224, 224}. + // Otherwise need vector> + + printf("Number of inputs = %zu\n", num_input_nodes); + + // iterate over all input nodes + for (int i = 0; i < num_input_nodes; i++) { + // print input node names + char* input_name = session.GetInputName(i, allocator); + printf("Input %d : name=%s\n", i, input_name); + input_node_names[i] = input_name; + + // print input node types + Ort::TypeInfo type_info = session.GetInputTypeInfo(i); + auto tensor_info = type_info.GetTensorTypeAndShapeInfo(); + + ONNXTensorElementDataType type = tensor_info.GetElementType(); + printf("Input %d : type=%d\n", i, type); + + // print input shapes/dims + input_node_dims = tensor_info.GetShape(); + printf("Input %d : num_dims=%zu\n", i, input_node_dims.size()); + for (int j = 0; j < input_node_dims.size(); j++) + printf("Input %d : dim %d=%lld\n", i, j, input_node_dims[j]); + } + + // Results should be... + // Number of inputs = 1 + // Input 0 : name = data_0 + // Input 0 : type = 1 + // Input 0 : num_dims = 4 + // Input 0 : dim 0 = 1 + // Input 0 : dim 1 = 3 + // Input 0 : dim 2 = 224 + // Input 0 : dim 3 = 224 + + //************************************************************************* + // Similar operations to get output node information. + // Use OrtSessionGetOutputCount(), OrtSessionGetOutputName() + // OrtSessionGetOutputTypeInfo() as shown above. + + //************************************************************************* + // Score the model using sample data, and inspect values + + size_t input_tensor_size = 224 * 224 * 3; // simplify ... using known dim values to calculate size + // use OrtGetTensorShapeElementCount() to get official size! + + std::vector input_tensor_values(input_tensor_size); + std::vector output_node_names = {"softmaxout_1"}; + + // initialize input data with values in [0.0, 1.0] + for (unsigned int i = 0; i < input_tensor_size; i++) + input_tensor_values[i] = (float)i / (input_tensor_size + 1); + + // create input tensor object from data values + auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); + Ort::Value input_tensor = Ort::Value::CreateTensor(memory_info, input_tensor_values.data(), input_tensor_size, input_node_dims.data(), 4); + assert(input_tensor.IsTensor()); + + // score model & input tensor, get back output tensor + auto output_tensors = session.Run(Ort::RunOptions{nullptr}, input_node_names.data(), &input_tensor, 1, output_node_names.data(), 1); + assert(output_tensors.size() == 1 && output_tensors.front().IsTensor()); + + // Get pointer to output tensor float values + float* floatarr = output_tensors.front().GetTensorMutableData(); + assert(abs(floatarr[0] - 0.000045) < 1e-6); + + // score the model, and print scores for first 5 classes + for (int i = 0; i < 5; i++) + printf("Score for class [%d] = %f\n", i, floatarr[i]); + + // Results should be as below... + // Score for class[0] = 0.000045 + // Score for class[1] = 0.003846 + // Score for class[2] = 0.000125 + // Score for class[3] = 0.001180 + // Score for class[4] = 0.001317 + printf("Done!\n"); + return 0; +} diff --git a/recipes/onnxruntime/config.yml b/recipes/onnxruntime/config.yml new file mode 100644 index 00000000000000..4c7c778dfa5404 --- /dev/null +++ b/recipes/onnxruntime/config.yml @@ -0,0 +1,7 @@ +versions: + "1.7.1": + folder: all + "1.5.3": + folder: all + "1.2.0": + folder: all