From 2df7b7d746c00f3871b1e4545aedbcc79252a229 Mon Sep 17 00:00:00 2001 From: Bartek Szmelczynski Date: Thu, 21 Oct 2021 16:04:24 +0200 Subject: [PATCH 01/46] Bind exec core ov (#50) --- .../bindings/python/src/openvino/__init__.py | 1 + .../bindings/python/src/openvino/ie_api.py | 6 + .../python/src/openvino/impl/__init__.py | 1 + .../python/src/pyopenvino/core/core.cpp | 114 +++++++++ .../pyopenvino/core/{ie_core.hpp => core.hpp} | 0 .../pyopenvino/core/executable_network.cpp | 76 ++++++ ...ble_network.hpp => executable_network.hpp} | 0 .../python/src/pyopenvino/core/ie_core.cpp | 160 ------------ .../pyopenvino/core/ie_executable_network.cpp | 91 ------- .../src/pyopenvino/core/ie_infer_request.cpp | 2 +- .../python/src/pyopenvino/core/ie_version.cpp | 21 +- .../src/pyopenvino/graph/node_output.cpp | 67 +---- .../src/pyopenvino/graph/node_output.hpp | 75 +++++- .../python/src/pyopenvino/pyopenvino.cpp | 7 +- runtime/bindings/python/tests/__init__.py | 1 + runtime/bindings/python/tests/runtime.py | 44 +--- .../tests/test_inference_engine/test_core.py | 121 ++++----- .../test_executable_network.py | 229 ++++++++++++++++++ .../test_infer_request.py | 34 +-- .../test_onnx/test_onnx_external_data.py | 6 +- .../tests/test_onnx/test_onnx_import.py | 6 +- .../tests/test_onnx/utils/onnx_helpers.py | 8 +- 22 files changed, 600 insertions(+), 470 deletions(-) create mode 100644 runtime/bindings/python/src/pyopenvino/core/core.cpp rename runtime/bindings/python/src/pyopenvino/core/{ie_core.hpp => core.hpp} (100%) create mode 100644 runtime/bindings/python/src/pyopenvino/core/executable_network.cpp rename runtime/bindings/python/src/pyopenvino/core/{ie_executable_network.hpp => executable_network.hpp} (100%) delete mode 100644 runtime/bindings/python/src/pyopenvino/core/ie_core.cpp delete mode 100644 runtime/bindings/python/src/pyopenvino/core/ie_executable_network.cpp create mode 100644 runtime/bindings/python/tests/test_inference_engine/test_executable_network.py diff --git a/runtime/bindings/python/src/openvino/__init__.py b/runtime/bindings/python/src/openvino/__init__.py index 0334300387aa83..9ece8649f652e2 100644 --- a/runtime/bindings/python/src/openvino/__init__.py +++ b/runtime/bindings/python/src/openvino/__init__.py @@ -18,6 +18,7 @@ from openvino.ie_api import async_infer from openvino.ie_api import get_result from openvino.ie_api import blob_from_file +from openvino.ie_api import tensor_from_file from openvino.impl import Dimension from openvino.impl import Function diff --git a/runtime/bindings/python/src/openvino/ie_api.py b/runtime/bindings/python/src/openvino/ie_api.py index 925bc2ad5bb545..29588032458062 100644 --- a/runtime/bindings/python/src/openvino/ie_api.py +++ b/runtime/bindings/python/src/openvino/ie_api.py @@ -15,6 +15,7 @@ from openvino.pyopenvino import TBlobUint8 from openvino.pyopenvino import TensorDesc from openvino.pyopenvino import InferRequest +from openvino.pyopenvino import Tensor precision_map = {"FP32": np.float32, @@ -112,3 +113,8 @@ def blob_from_file(path_to_bin_file: str) -> BlobWrapper: array = np.fromfile(path_to_bin_file, dtype=np.uint8) tensor_desc = TensorDesc("U8", array.shape, "C") return BlobWrapper(tensor_desc, array) + +# flake8: noqa: D102 +def tensor_from_file(path: str) -> Tensor: + """The data will be read with dtype of unit8""" + return Tensor(np.fromfile(path, dtype=np.uint8)) diff --git a/runtime/bindings/python/src/openvino/impl/__init__.py b/runtime/bindings/python/src/openvino/impl/__init__.py index 641764122dc5a9..cfde3f4b3e1d66 100644 --- a/runtime/bindings/python/src/openvino/impl/__init__.py +++ b/runtime/bindings/python/src/openvino/impl/__init__.py @@ -49,4 +49,5 @@ from openvino.pyopenvino import Coordinate from openvino.pyopenvino import Output from openvino.pyopenvino import Layout +from openvino.pyopenvino import ConstOutput from openvino.pyopenvino import util diff --git a/runtime/bindings/python/src/pyopenvino/core/core.cpp b/runtime/bindings/python/src/pyopenvino/core/core.cpp new file mode 100644 index 00000000000000..35c7e54c57c0c5 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/core.cpp @@ -0,0 +1,114 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/core/core.hpp" + +#include +#include + +#include +#include + +#include "common.hpp" + +namespace py = pybind11; + +using ConfigMap = std::map; + +std::string to_string(py::handle handle) { + auto encodedString = PyUnicode_AsUTF8String(handle.ptr()); + return PyBytes_AsString(encodedString); +} + +void regclass_Core(py::module m) { + py::class_> cls(m, "Core"); + cls.def(py::init(), py::arg("xml_config_file") = ""); + + cls.def("set_config", + (void (ov::runtime::Core::*)(const ConfigMap&, const std::string&)) & ov::runtime::Core::set_config, + py::arg("config"), + py::arg("device_name") = ""); + + cls.def( + "compile_model", + (ov::runtime::ExecutableNetwork( + ov::runtime::Core::*)(const std::shared_ptr&, const std::string&, const ConfigMap&)) & + ov::runtime::Core::compile_model, + py::arg("network"), + py::arg("device_name"), + py::arg("config") = py::dict()); + + cls.def( + "add_extension", + [](ov::runtime::Core& self, const std::string& extension_path) { + auto extension_ptr = InferenceEngine::make_so_pointer(extension_path); + auto extension = std::dynamic_pointer_cast(extension_ptr); + self.add_extension(extension); + }, + py::arg("extension_path")); + + cls.def("get_versions", &ov::runtime::Core::get_versions); + + cls.def("read_model", + (std::shared_ptr(ov::runtime::Core::*)(const std::string&, const std::string&) const) & + ov::runtime::Core::read_model, + py::arg("model"), + py::arg("weights") = ""); + + cls.def( + "read_model", + (std::shared_ptr(ov::runtime::Core::*)(const std::string&, const ov::runtime::Tensor&) const) & + ov::runtime::Core::read_model, + py::arg("model"), + py::arg("weights")); + + cls.def( + "read_model", + [](ov::runtime::Core& self, py::object model, py::object weights) { + return self.read_model(py::str(model), py::str(weights)); + }, + py::arg("model"), + py::arg("weights") = ""); + + cls.def( + "import_model", + (ov::runtime::ExecutableNetwork(ov::runtime::Core::*)(std::istream&, const std::string&, const ConfigMap&)) & + ov::runtime::Core::import_model, + py::arg("model_file"), + py::arg("device_name"), + py::arg("config") = py::none()); + + cls.def( + "get_config", + [](ov::runtime::Core& self, const std::string& device_name, const std::string& name) -> py::handle { + return Common::parse_parameter(self.get_config(device_name, name)); + }, + py::arg("device_name"), + py::arg("name")); + + cls.def( + "get_metric", + [](ov::runtime::Core& self, const std::string device_name, const std::string name) -> py::handle { + return Common::parse_parameter(self.get_metric(device_name, name)); + }, + py::arg("device_name"), + py::arg("name")); + + cls.def("register_plugin", &ov::runtime::Core::register_plugin, py::arg("plugin_name"), py::arg("device_name")); + + cls.def("register_plugins", &ov::runtime::Core::register_plugins, py::arg("xml_config_file")); + + cls.def("unload_plugin", &ov::runtime::Core::unload_plugin, py::arg("device_name")); + + cls.def( + "query_model", + (ov::runtime::SupportedOpsMap( + ov::runtime::Core::*)(const std::shared_ptr&, const std::string&, const ConfigMap&)) & + ov::runtime::Core::query_model, + py::arg("model"), + py::arg("device_name"), + py::arg("config") = py::dict()); + + cls.def_property_readonly("available_devices", &ov::runtime::Core::get_available_devices); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_core.hpp b/runtime/bindings/python/src/pyopenvino/core/core.hpp similarity index 100% rename from runtime/bindings/python/src/pyopenvino/core/ie_core.hpp rename to runtime/bindings/python/src/pyopenvino/core/core.hpp diff --git a/runtime/bindings/python/src/pyopenvino/core/executable_network.cpp b/runtime/bindings/python/src/pyopenvino/core/executable_network.cpp new file mode 100644 index 00000000000000..82817b68484f74 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/executable_network.cpp @@ -0,0 +1,76 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "openvino/runtime/executable_network.hpp" + +#include + +#include "common.hpp" +#include "pyopenvino/core/containers.hpp" +#include "pyopenvino/core/ie_infer_request.hpp" +#include "pyopenvino/core/ie_input_info.hpp" + +namespace py = pybind11; + +void regclass_ExecutableNetwork(py::module m) { + py::class_> cls( + m, + "ExecutableNetwork"); + + cls.def("create_infer_request", &ov::runtime::ExecutableNetwork::create_infer_request); + + // cls.def("infer_new_request", [](ov::runtime::ExecutableNetwork& self, const py::dict& inputs) { + // TODO: implment after https://github.com/openvinotoolkit/openvino/pull/7962 + // will be merged as a seperate ticket + // }); + + cls.def("export_model", &ov::runtime::ExecutableNetwork::export_model, py::arg("network_model")); + + cls.def( + "get_config", + [](ov::runtime::ExecutableNetwork& self, const std::string& name) -> py::handle { + return Common::parse_parameter(self.get_config(name)); + }, + py::arg("name")); + + cls.def( + "get_metric", + [](ov::runtime::ExecutableNetwork& self, const std::string& name) -> py::handle { + return Common::parse_parameter(self.get_metric(name)); + }, + py::arg("name")); + + cls.def("get_runtime_function", &ov::runtime::ExecutableNetwork::get_runtime_function); + + cls.def_property_readonly("inputs", &ov::runtime::ExecutableNetwork::inputs); + + cls.def("input", + (ov::Output(ov::runtime::ExecutableNetwork::*)() const) & + ov::runtime::ExecutableNetwork::input); + + cls.def("input", + (ov::Output(ov::runtime::ExecutableNetwork::*)(size_t) const) & + ov::runtime::ExecutableNetwork::input, + py::arg("i")); + + cls.def("input", + (ov::Output(ov::runtime::ExecutableNetwork::*)(const std::string&) const) & + ov::runtime::ExecutableNetwork::input, + py::arg("tensor_name")); + + cls.def_property_readonly("outputs", &ov::runtime::ExecutableNetwork::outputs); + + cls.def("output", + (ov::Output(ov::runtime::ExecutableNetwork::*)() const) & + ov::runtime::ExecutableNetwork::output); + + cls.def("output", + (ov::Output(ov::runtime::ExecutableNetwork::*)(size_t) const) & + ov::runtime::ExecutableNetwork::output, + py::arg("i")); + + cls.def("output", + (ov::Output(ov::runtime::ExecutableNetwork::*)(const std::string&) const) & + ov::runtime::ExecutableNetwork::output, + py::arg("tensor_name")); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_executable_network.hpp b/runtime/bindings/python/src/pyopenvino/core/executable_network.hpp similarity index 100% rename from runtime/bindings/python/src/pyopenvino/core/ie_executable_network.hpp rename to runtime/bindings/python/src/pyopenvino/core/executable_network.hpp diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_core.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_core.cpp deleted file mode 100644 index 9d37bfb00f7fe3..00000000000000 --- a/runtime/bindings/python/src/pyopenvino/core/ie_core.cpp +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright (C) 2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyopenvino/core/ie_core.hpp" - -#include - -#include - -#include "common.hpp" - -namespace py = pybind11; - -std::string to_string(py::handle handle) { - auto encodedString = PyUnicode_AsUTF8String(handle.ptr()); - return PyBytes_AsString(encodedString); -} - -void regclass_Core(py::module m) { - py::class_> cls(m, "Core"); - cls.def(py::init()); - cls.def(py::init()); - - cls.def( - "set_config", - [](InferenceEngine::Core& self, const py::dict& config, const std::string& device_name) { - std::map config_map; - for (auto item : config) { - config_map[to_string(item.first)] = to_string(item.second); - } - self.SetConfig(config_map, device_name); - }, - py::arg("config"), - py::arg("device_name")); - - cls.def( - "load_network", - [](InferenceEngine::Core& self, - const InferenceEngine::CNNNetwork& network, - const std::string& device_name, - const std::map& config) { - return self.LoadNetwork(network, device_name, config); - }, - py::arg("network"), - py::arg("device_name"), - py::arg("config") = py::dict()); - - cls.def( - "add_extension", - [](InferenceEngine::Core& self, const std::string& extension_path, const std::string& device_name) { - auto extension_ptr = InferenceEngine::make_so_pointer(extension_path); - auto extension = std::dynamic_pointer_cast(extension_ptr); - self.AddExtension(extension, device_name); - }, - py::arg("extension_path"), - py::arg("device_name")); - - cls.def( - "get_versions", - [](InferenceEngine::Core& self, const std::string& device_name) { - return self.GetVersions(device_name); - }, - py::arg("device_name")); - - cls.def( - "read_network", - [](InferenceEngine::Core& self, py::bytes model, py::bytes weights) { - InferenceEngine::MemoryBlob::Ptr weights_blob; - if (weights) { - std::string weights_bytes = weights; - uint8_t* bin = (uint8_t*)weights_bytes.c_str(); - size_t bin_size = weights_bytes.length(); - InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, - {bin_size}, - InferenceEngine::Layout::C); - weights_blob = InferenceEngine::make_shared_blob(tensorDesc); - weights_blob->allocate(); - memcpy(weights_blob->rwmap().as(), bin, bin_size); - } - return self.ReadNetwork(model, weights_blob); - }, - py::arg("model"), - py::arg("weights")); - - cls.def( - "read_network", - [](InferenceEngine::Core& self, const std::string& model, const std::string& weights) { - return self.ReadNetwork(model, weights); - }, - py::arg("model"), - py::arg("weights") = ""); - - cls.def( - "read_network", - [](InferenceEngine::Core& self, const std::string& model, py::handle blob) { - return self.ReadNetwork(model, Common::cast_to_blob(blob)); - }, - py::arg("model"), - py::arg("blob")); - - cls.def( - "read_network", - [](InferenceEngine::Core& self, py::object model, py::object weights) { - return self.ReadNetwork(py::str(model), py::str(weights)); - }, - py::arg("model"), - py::arg("weights") = ""); - - cls.def( - "import_network", - [](InferenceEngine::Core& self, - const std::string& model_file, - const std::string& device_name, - const std::map& config) { - return self.ImportNetwork(model_file, device_name, config); - }, - py::arg("model_file"), - py::arg("device_name"), - py::arg("config") = py::none()); - - cls.def( - "get_config", - [](InferenceEngine::Core& self, const std::string& device_name, const std::string& config_name) -> py::handle { - return Common::parse_parameter(self.GetConfig(device_name, config_name)); - }, - py::arg("device_name"), - py::arg("config_name")); - - cls.def( - "get_metric", - [](InferenceEngine::Core& self, std::string device_name, std::string metric_name) -> py::handle { - return Common::parse_parameter(self.GetMetric(device_name, metric_name)); - }, - py::arg("device_name"), - py::arg("metric_name")); - - cls.def("register_plugin", - &InferenceEngine::Core::RegisterPlugin, - py::arg("plugin_name"), - py::arg("device_name") = py::str()); - - cls.def("register_plugins", &InferenceEngine::Core::RegisterPlugins); - - cls.def("unregister_plugin", &InferenceEngine::Core::UnregisterPlugin, py::arg("device_name")); - - cls.def( - "query_network", - [](InferenceEngine::Core& self, - const InferenceEngine::CNNNetwork& network, - const std::string& device_name, - const std::map& config) { - return self.QueryNetwork(network, device_name, config).supportedLayersMap; - }, - py::arg("network"), - py::arg("device_name"), - py::arg("config") = py::dict()); - - cls.def_property_readonly("available_devices", &InferenceEngine::Core::GetAvailableDevices); -} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_executable_network.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_executable_network.cpp deleted file mode 100644 index 37199110f09e68..00000000000000 --- a/runtime/bindings/python/src/pyopenvino/core/ie_executable_network.cpp +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (C) 2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -#include "pyopenvino/core/ie_executable_network.hpp" - -#include -#include - -#include "common.hpp" -#include "pyopenvino/core/containers.hpp" -#include "pyopenvino/core/ie_infer_request.hpp" -#include "pyopenvino/core/ie_input_info.hpp" - -namespace py = pybind11; - -void regclass_ExecutableNetwork(py::module m) { - py::class_> cls( - m, - "ExecutableNetwork"); - - cls.def("create_infer_request", [](InferenceEngine::ExecutableNetwork& self) { - auto request = InferRequestWrapper(self.CreateInferRequest()); - // Get Inputs and Outputs info from executable network - request._inputsInfo = self.GetInputsInfo(); - request._outputsInfo = self.GetOutputsInfo(); - // request.user_callback_defined = false; - return request; - }); - - cls.def( - "_infer", - [](InferenceEngine::ExecutableNetwork& self, const py::dict& inputs) { - // Create temporary InferRequest - auto request = self.CreateInferRequest(); - // Update inputs if there are any - if (!inputs.empty()) { - Common::set_request_blobs(request, inputs); //, self.GetInputsInfo()); - } - // Call Infer function - request.Infer(); - // Get output Blobs and return - Containers::PyResults results; - InferenceEngine::ConstOutputsDataMap outputsInfo = self.GetOutputsInfo(); - for (auto& out : outputsInfo) { - results[out.first] = request.GetBlob(out.first); - } - return results; - }, - py::arg("inputs")); - - cls.def("get_exec_graph_info", &InferenceEngine::ExecutableNetwork::GetExecGraphInfo); - - cls.def( - "export", - [](InferenceEngine::ExecutableNetwork& self, const std::string& modelFileName) { - self.Export(modelFileName); - }, - py::arg("model_file")); - - cls.def( - "get_config", - [](InferenceEngine::ExecutableNetwork& self, const std::string& config_name) -> py::handle { - return Common::parse_parameter(self.GetConfig(config_name)); - }, - py::arg("config_name")); - - cls.def( - "get_metric", - [](InferenceEngine::ExecutableNetwork& self, const std::string& metric_name) -> py::handle { - return Common::parse_parameter(self.GetMetric(metric_name)); - }, - py::arg("metric_name")); - - cls.def_property_readonly("input_info", [](InferenceEngine::ExecutableNetwork& self) { - Containers::PyConstInputsDataMap inputs; - const InferenceEngine::ConstInputsDataMap& inputsInfo = self.GetInputsInfo(); - for (const auto& in : inputsInfo) { - inputs[in.first] = in.second; - } - return inputs; - }); - - cls.def_property_readonly("output_info", [](InferenceEngine::ExecutableNetwork& self) { - Containers::PyOutputsDataMap outputs; - InferenceEngine::ConstOutputsDataMap outputsInfo = self.GetOutputsInfo(); - for (auto& out : outputsInfo) { - outputs[out.first] = out.second; - } - return outputs; - }); -} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.cpp index f45ddd6a5cd73d..52d250f92ee7b7 100644 --- a/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.cpp @@ -10,7 +10,7 @@ #include "pyopenvino/core/common.hpp" #include "pyopenvino/core/containers.hpp" -#include "pyopenvino/core/ie_executable_network.hpp" +#include "pyopenvino/core/executable_network.hpp" #include "pyopenvino/core/ie_preprocess_info.hpp" namespace py = pybind11; diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_version.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_version.cpp index 158cda68ceaaef..45b2b0ed6b30c9 100644 --- a/runtime/bindings/python/src/pyopenvino/core/ie_version.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/ie_version.cpp @@ -2,24 +2,23 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "pyopenvino/core/ie_version.hpp" +#include -#include +#include "openvino/core/version.hpp" namespace py = pybind11; void regclass_Version(py::module m) { - py::class_ cls(m, "Version"); + py::class_ cls(m, "Version"); - cls.def_readonly("build_number", &InferenceEngine::Version::buildNumber); - cls.def_readonly("description", &InferenceEngine::Version::description); - cls.def_readwrite("api_version", &InferenceEngine::Version::apiVersion); + cls.def_readonly("build_number", &ov::Version::buildNumber); + cls.def_readonly("description", &ov::Version::description); - cls.def_property_readonly("major", [](InferenceEngine::Version& self) { - return IE_VERSION_MAJOR; + cls.def_property_readonly("major", [](ov::Version& self) { + return OPENVINO_VERSION_MAJOR; }); - cls.def_property_readonly("minor", [](InferenceEngine::Version& self) { - return IE_VERSION_MINOR; + cls.def_property_readonly("minor", [](ov::Version& self) { + return OPENVINO_VERSION_MINOR; }); -} \ No newline at end of file +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/node_output.cpp b/runtime/bindings/python/src/pyopenvino/graph/node_output.cpp index 56ac60e3ba9f64..8d1cfcec5bb7d0 100644 --- a/runtime/bindings/python/src/pyopenvino/graph/node_output.cpp +++ b/runtime/bindings/python/src/pyopenvino/graph/node_output.cpp @@ -11,68 +11,5 @@ namespace py = pybind11; -void regclass_graph_Output(py::module m) { - py::class_, std::shared_ptr>> output(m, "Output", py::dynamic_attr()); - output.doc() = "openvino.impl.Output wraps ov::Output"; - - output.def("get_node", - &ov::Output::get_node, - R"( - Get node referenced by this output handle. - - Returns - ---------- - get_node : Node - Node object referenced by this output handle. - )"); - output.def("get_index", - &ov::Output::get_index, - R"( - The index of the output referred to by this output handle. - - Returns - ---------- - get_index : int - Index value as integer. - )"); - output.def("get_element_type", - &ov::Output::get_element_type, - R"( - The element type of the output referred to by this output handle. - - Returns - ---------- - get_element_type : Type - Type of the output. - )"); - output.def("get_shape", - &ov::Output::get_shape, - R"( - The shape of the output referred to by this output handle. - - Returns - ---------- - get_shape : Shape - Shape of the output. - )"); - output.def("get_partial_shape", - &ov::Output::get_partial_shape, - R"( - The partial shape of the output referred to by this output handle. - - Returns - ---------- - get_partial_shape : PartialShape - PartialShape of the output. - )"); - output.def("get_target_inputs", - &ov::Output::get_target_inputs, - R"( - A set containing handles for all inputs targeted by the output - referenced by this output handle. - Returns - ---------- - get_target_inputs : Set[Input] - Set of Inputs. - )"); -} +template void regclass_graph_Output(py::module m, std::string typestring); +template void regclass_graph_Output(py::module m, std::string typestring); diff --git a/runtime/bindings/python/src/pyopenvino/graph/node_output.hpp b/runtime/bindings/python/src/pyopenvino/graph/node_output.hpp index 9934c628b2e098..a88722ebc18448 100644 --- a/runtime/bindings/python/src/pyopenvino/graph/node_output.hpp +++ b/runtime/bindings/python/src/pyopenvino/graph/node_output.hpp @@ -5,7 +5,80 @@ #pragma once #include +#include + +#include "openvino/core/node_output.hpp" namespace py = pybind11; -void regclass_graph_Output(py::module m); +template +void regclass_graph_Output(py::module m, std::string typestring) +{ + auto pyclass_name = py::detail::c_str((typestring + std::string("Output"))); + auto docs = py::detail::c_str((std::string("openvino.impl.") + typestring + std::string("Output wraps ov::Output<") + typestring + std::string(" ov::Node >"))); + py::class_, std::shared_ptr>> output(m, + pyclass_name, + py::dynamic_attr()); + output.doc() = docs; + + output.def("get_node", + &ov::Output::get_node, + R"( + Get node referenced by this output handle. + + Returns + ---------- + get_node : Node or const Node + Node object referenced by this output handle. + )"); + output.def("get_index", + &ov::Output::get_index, + R"( + The index of the output referred to by this output handle. + + Returns + ---------- + get_index : int + Index value as integer. + )"); + output.def("get_element_type", + &ov::Output::get_element_type, + R"( + The element type of the output referred to by this output handle. + + Returns + ---------- + get_element_type : Type + Type of the output. + )"); + output.def("get_shape", + &ov::Output::get_shape, + R"( + The shape of the output referred to by this output handle. + + Returns + ---------- + get_shape : Shape + Shape of the output. + )"); + output.def("get_partial_shape", + &ov::Output::get_partial_shape, + R"( + The partial shape of the output referred to by this output handle. + + Returns + ---------- + get_partial_shape : PartialShape + PartialShape of the output. + )"); + output.def("get_target_inputs", + &ov::Output::get_target_inputs, + R"( + A set containing handles for all inputs targeted by the output + referenced by this output handle. + Returns + ---------- + get_target_inputs : Set[Input] + Set of Inputs. + )"); +} diff --git a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp index 7611244a38b410..8384e753134ad9 100644 --- a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -21,10 +21,10 @@ # include "pyopenvino/graph/onnx_import/onnx_import.hpp" #endif #include "pyopenvino/core/containers.hpp" +#include "pyopenvino/core/core.hpp" +#include "pyopenvino/core/executable_network.hpp" #include "pyopenvino/core/ie_blob.hpp" -#include "pyopenvino/core/ie_core.hpp" #include "pyopenvino/core/ie_data.hpp" -#include "pyopenvino/core/ie_executable_network.hpp" #include "pyopenvino/core/ie_infer_queue.hpp" #include "pyopenvino/core/ie_infer_request.hpp" #include "pyopenvino/core/ie_input_info.hpp" @@ -92,7 +92,6 @@ PYBIND11_MODULE(pyopenvino, m) { regclass_graph_PartialShape(m); regclass_graph_Node(m); regclass_graph_Input(m); - regclass_graph_Output(m); regclass_graph_NodeFactory(m); regclass_graph_Strides(m); regclass_graph_CoordinateDiff(m); @@ -113,6 +112,8 @@ PYBIND11_MODULE(pyopenvino, m) { regclass_graph_Variant(m); regclass_graph_VariantWrapper(m, std::string("String")); regclass_graph_VariantWrapper(m, std::string("Int")); + regclass_graph_Output(m, std::string("")); + regclass_graph_Output(m, std::string("Const")); regclass_Core(m); regclass_IENetwork(m); diff --git a/runtime/bindings/python/tests/__init__.py b/runtime/bindings/python/tests/__init__.py index 2ed2779a0de11a..d6efec455e9e5f 100644 --- a/runtime/bindings/python/tests/__init__.py +++ b/runtime/bindings/python/tests/__init__.py @@ -146,3 +146,4 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True): xfail_issue_63138 = xfail_test(reason="Missing ONNX Shape-15 support") xfail_issue_63643 = xfail_test(reason="RuntimeError: Unsupported operation of type: Convolution name") xfail_issue_54663 = xfail_test(reason="Disabled until MaxPool-8 is supported on CPU") +xfail_issue_68212 = xfail_test(reason="Unsupported reading model with bytes streams") diff --git a/runtime/bindings/python/tests/runtime.py b/runtime/bindings/python/tests/runtime.py index cc5c49620a6c5e..2b79cf3af5af50 100644 --- a/runtime/bindings/python/tests/runtime.py +++ b/runtime/bindings/python/tests/runtime.py @@ -32,22 +32,6 @@ def get_runtime(): return runtime() -def _convert_inputs(cnn_network: IENetwork) -> None: - """WA converts unsupported input images formats.""" - precision_map = { - "FP64": "FP32", - "I64": "I32", - "U32": "I32", - } - - for cnn_input in cnn_network.input_info: - try: - _precision = precision_map[cnn_network.input_info[cnn_input].precision] - cnn_network.input_info[cnn_input].precision = _precision - except KeyError: - pass - - def _convert_val(val): """WA converts unsupported input values.""" if type(val) is np.ndarray: @@ -60,18 +44,6 @@ def _convert_val(val): return np.array(val, dtype=np.float32) -def apply_ng_type(output: DataPtr, ng_type: Type): - ng_ie_supported_type_map = { - Type.boolean.get_type_name(): "BOOL", - Type.f32.get_type_name(): "FP32", - Type.i8.get_type_name(): "I8", - Type.i32.get_type_name(): "I32", - Type.u8.get_type_name(): "U8", - } - if ng_type.get_type_name() in ng_ie_supported_type_map: - output.precision = ng_ie_supported_type_map[ng_type.get_type_name()] - - class Runtime(object): """Represents an nGraph runtime environment.""" @@ -166,21 +138,15 @@ def __call__(self, *input_values: NumericData) -> List[NumericData]: param_names = [param.friendly_name for param in self.parameters] if self.network_cache.get(str(input_shapes)) is None: - cnn_network = IENetwork(self.function) + function = self.function if self.function.is_dynamic(): - cnn_network.reshape(dict(zip(param_names, input_shapes))) + function.reshape(dict(zip(param_names, input_shapes))) # Convert unsupported inputs of the network - _convert_inputs(cnn_network) - self.network_cache[str(input_shapes)] = cnn_network + self.network_cache[str(input_shapes)] = function else: - cnn_network = self.network_cache[str(input_shapes)] - - # set output blobs precission based on nG results - for ng_result in self.results: - ie_out_name = self._get_ie_output_blob_name(cnn_network.outputs, ng_result) - apply_ng_type(cnn_network.outputs[ie_out_name], ng_result.get_output_element_type(0)) + function = self.network_cache[str(input_shapes)] - executable_network = self.runtime.backend.load_network(cnn_network, self.runtime.backend_name) + executable_network = self.runtime.backend.compile_model(function, self.runtime.backend_name) for parameter, input in zip(self.parameters, input_values): parameter_shape = parameter.get_output_partial_shape(0) diff --git a/runtime/bindings/python/tests/test_inference_engine/test_core.py b/runtime/bindings/python/tests/test_inference_engine/test_core.py index 59ed993278c8aa..7035d5e4dcea75 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_core.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_core.py @@ -8,7 +8,7 @@ from pathlib import Path import openvino.opset8 as ov -from openvino import Core, IENetwork, ExecutableNetwork, blob_from_file +from openvino import Core, IENetwork, ExecutableNetwork, tensor_from_file from openvino.impl import Function, Shape, Type from openvino.impl.op import Parameter from openvino import TensorDesc, Blob @@ -51,7 +51,7 @@ def test_ie_core_class(): ie_core = Core() ie_core.set_config({}, device_name="CPU") - executable_network = ie_core.load_network(cnn_network, "CPU", {}) + executable_network = ie_core.compile_model(cnn_network, "CPU", {}) td = TensorDesc("FP32", input_shape, "NCHW") @@ -72,96 +72,80 @@ def test_ie_core_class(): assert np.allclose(result, expected_output) -def test_load_network(device): +def test_compile_model(device): ie = Core() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, device) + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) assert isinstance(exec_net, ExecutableNetwork) -def test_read_network(): +def test_read_model(): ie_core = Core() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - assert isinstance(net, IENetwork) + func = ie_core.read_model(model=test_net_xml, weights=test_net_bin) + assert isinstance(func, Function) - net = ie_core.read_network(model=test_net_xml) - assert isinstance(net, IENetwork) + func = ie_core.read_model(model=test_net_xml) + assert isinstance(func, Function) -def test_read_network_from_blob(): +def test_read_model_from_blob(): ie_core = Core() model = open(test_net_xml).read() - blob = blob_from_file(test_net_bin) - net = ie_core.read_network(model=model, blob=blob) - assert isinstance(net, IENetwork) + tensor = tensor_from_file(test_net_bin) + func = ie_core.read_model(model=model, weights=tensor) + assert isinstance(func, Function) -def test_read_network_from_blob_valid(): +def test_read_model_as_path(): ie_core = Core() - model = open(test_net_xml).read() - blob = blob_from_file(test_net_bin) - net = ie_core.read_network(model=model, blob=blob) - ref_net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - assert net.name == ref_net.name - assert net.batch_size == ref_net.batch_size - ii_net = net.input_info - ii_net2 = ref_net.input_info - o_net = net.outputs - o_net2 = ref_net.outputs - assert ii_net.keys() == ii_net2.keys() - assert o_net.keys() == o_net2.keys() - - -def test_read_network_as_path(): - ie_core = Core() - net = ie_core.read_network(model=Path(test_net_xml), weights=Path(test_net_bin)) - assert isinstance(net, IENetwork) + func = ie_core.read_model(model=Path(test_net_xml), weights=Path(test_net_bin)) + assert isinstance(func, Function) - net = ie_core.read_network(model=test_net_xml, weights=Path(test_net_bin)) - assert isinstance(net, IENetwork) + func = ie_core.read_model(model=test_net_xml, weights=Path(test_net_bin)) + assert isinstance(func, Function) - net = ie_core.read_network(model=Path(test_net_xml)) - assert isinstance(net, IENetwork) + func = ie_core.read_model(model=Path(test_net_xml)) + assert isinstance(func, Function) -def test_read_network_from_onnx(): +def test_read_model_from_onnx(): ie_core = Core() - net = ie_core.read_network(model=test_net_onnx) - assert isinstance(net, IENetwork) + func = ie_core.read_model(model=test_net_onnx) + assert isinstance(func, Function) -def test_read_network_from_onnx_as_path(): +def test_read_model_from_onnx_as_path(): ie_core = Core() - net = ie_core.read_network(model=Path(test_net_onnx)) - assert isinstance(net, IENetwork) - + func = ie_core.read_model(model=Path(test_net_onnx)) + assert isinstance(func, Function) +@pytest.mark.xfail("68212") def test_read_net_from_buffer(): ie_core = Core() with open(test_net_bin, "rb") as f: bin = f.read() with open(model_path()[0], "rb") as f: xml = f.read() - net = ie_core.read_network(model=xml, weights=bin) - assert isinstance(net, IENetwork) - + func = ie_core.read_model(model=xml, weights=bin) + assert isinstance(func, IENetwork) +@pytest.mark.xfail("68212") def test_net_from_buffer_valid(): ie_core = Core() with open(test_net_bin, "rb") as f: bin = f.read() with open(model_path()[0], "rb") as f: xml = f.read() - net = ie_core.read_network(model=xml, weights=bin) - ref_net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - assert net.name == ref_net.name - assert net.batch_size == ref_net.batch_size - ii_net = net.input_info - ii_net2 = ref_net.input_info - o_net = net.outputs - o_net2 = ref_net.outputs - assert ii_net.keys() == ii_net2.keys() - assert o_net.keys() == o_net2.keys() + func = ie_core.read_model(model=xml, weights=bin) + ref_func = ie_core.read_model(model=test_net_xml, weights=test_net_bin) + assert func.name == func.name + assert func.batch_size == ref_func.batch_size + ii_func = func.input_info + ii_func2 = ref_func.input_info + o_func = func.outputs + o_func2 = ref_func.outputs + assert ii_func.keys() == ii_func2.keys() + assert o_func.keys() == o_func2.keys() def test_get_version(device): @@ -230,15 +214,14 @@ def test_get_metric_str(): f"metric must be string but {type(param)} is returned" -def test_query_network(device): +def test_query_model(device): ie = Core() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - query_res = ie.query_network(network=net, device_name=device) - func_net = net.get_function() - ops_net = func_net.get_ordered_ops() - ops_net_names = [op.friendly_name for op in ops_net] - assert [key for key in query_res.keys() if key not in ops_net_names] == [], \ - "Not all network layers present in query_network results" + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + query_res = ie.query_model(model=func, device_name=device) + ops_func = func.get_ordered_ops() + ops_func_names = [op.friendly_name for op in ops_func] + assert [key for key in query_res.keys() if key not in ops_func_names] == [], \ + "Not all network layers present in query_model results" assert next(iter(set(query_res.values()))) == device, "Wrong device for some layers" @@ -246,8 +229,8 @@ def test_query_network(device): def test_register_plugin(): ie = Core() ie.register_plugin("MKLDNNPlugin", "BLA") - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, "BLA") + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, "BLA") assert isinstance(exec_net, ExecutableNetwork), \ "Cannot load the network to the registered plugin with name 'BLA'" @@ -262,8 +245,8 @@ def test_register_plugins(): elif platform == "win32": ie.register_plugins(plugins_win_xml) - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, "CUSTOM") + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, "CUSTOM") assert isinstance(exec_net, ExecutableNetwork), "Cannot load the network to " \ "the registered plugin with name 'CUSTOM' " \ diff --git a/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py b/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py new file mode 100644 index 00000000000000..79ebaf7b5056c3 --- /dev/null +++ b/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py @@ -0,0 +1,229 @@ +import numpy as np +import os +import pytest +import warnings +import time +from pathlib import Path + +from ..conftest import model_path, image_path +from openvino.impl import Function, ConstOutput, Shape, PartialShape + +from openvino import Core + +is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD" +path_to_image = image_path() +test_net_xml, test_net_bin = model_path(is_myriad) + +def image_path(): + path_to_repo = os.environ["DATA_PATH"] + path_to_img = os.path.join(path_to_repo, "validation_set", "224x224", "dog.bmp") + return path_to_img + + +def model_path(is_myriad=False): + path_to_repo = os.environ["MODELS_PATH"] + if not is_myriad: + test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml") + test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin") + else: + test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml") + test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin") + return (test_xml, test_bin) + + +def read_image(): + import cv2 + n, c, h, w = (1, 3, 32, 32) + image = cv2.imread(path_to_img) + if image is None: + raise FileNotFoundError("Input image not found") + + image = cv2.resize(image, (h, w)) / 255 + image = image.transpose((2, 0, 1)).astype(np.float32) + image = image.reshape((n, c, h, w)) + return image + + +def test_get_runtime_function(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + runtime_func = exec_net.get_runtime_function() + assert isinstance(runtime_func, Function) + + +def test_get_input_i(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + input = exec_net.input(0); + input_node = input.get_node() + name = input_node.friendly_name + assert isinstance(input, ConstOutput) + assert name == "data" + + +def test_get_input_tensor_name(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + input = exec_net.input("data"); + input_node = input.get_node() + name = input_node.friendly_name + assert isinstance(input, ConstOutput) + assert name == "data" + + +def test_get_input(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + input = exec_net.input(); + input_node = input.get_node() + name = input_node.friendly_name + assert isinstance(input, ConstOutput) + assert name == "data" + + +def test_get_output_i(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + output = exec_net.output(0); + assert isinstance(output, ConstOutput) + + +def test_get_output(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + output = exec_net.output(); + output_node = output.get_node() + assert isinstance(output, ConstOutput) + + +def test_input_set_friendly_name(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + input = exec_net.input("data"); + input_node = input.get_node() + input_node.set_friendly_name("input_1") + name = input_node.friendly_name + assert isinstance(input, ConstOutput) + assert name == "input_1" + + +def test_output_set_friendly_name(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + output = exec_net.output(0); + output_node = output.get_node() + output_node.set_friendly_name("output_1") + name = output_node.friendly_name + assert isinstance(output, ConstOutput) + assert name == "output_1" + + +def test_outputs(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + outputs = exec_net.outputs + assert isinstance(outputs, list) + assert len(outputs) == 1 + + +def test_outputs_items(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + outputs = exec_net.outputs + assert isinstance(outputs[0], ConstOutput) + + +def test_output_type(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + output = exec_net.output(0) + output_type = output.get_element_type().get_type_name() + assert output_type == "f32" + +def test_output_shape(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + output = exec_net.output(0) + expected_shape = Shape([1, 10]) + assert str(output.get_shape()) == str(expected_shape) + + +def test_input_get_index(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + input = exec_net.input(0) + expected_idx = 0 + assert input.get_index() == expected_idx + + +def test_input_get_index(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + input = exec_net.input(0) + expected_partial_shape = PartialShape([1, 3, 32 ,32]) + assert input.get_partial_shape() == expected_partial_shape + + +def test_inputs(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + inputs = exec_net.inputs + assert isinstance(inputs, list) + assert len(inputs) == 1 + + +def test_inputs_items(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + inputs = exec_net.inputs + assert isinstance(inputs[0], ConstOutput) + + +def test_inputs_get_friendly_name(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + inputs = exec_net.inputs + input_0 = inputs[0] + node = input_0.get_node() + name = node.friendly_name + assert name == "data" + + +def test_inputs_set_friendly_name(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + inputs = exec_net.inputs + input_0 = inputs[0] + node = input_0.get_node() + node.set_friendly_name("input_0") + name = node.friendly_name + assert name == "input_0" + + +def test_inputs_docs(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + inputs = exec_net.inputs + input_0 = inputs[0] + exptected_string = "openvino.impl.ConstOutput wraps ov::Output" + assert input_0.__doc__ == exptected_string + diff --git a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py index 518a11cb37dead..50fbf1db853920 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py @@ -15,9 +15,9 @@ def test_get_perf_counts(device): ie_core = Core() - net = ie_core.read_network(test_net_xml, test_net_bin) + func = ie_core.read_model(test_net_xml, test_net_bin) ie_core.set_config({"PERF_COUNT": "YES"}, device) - exec_net = ie_core.load_network(net, device) + exec_net = ie_core.compile_model(func, device) img = read_image() request = exec_net.create_infer_request() td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW") @@ -39,10 +39,10 @@ def test_get_perf_counts(device): def test_set_batch_size(device): ie_core = Core() ie_core.set_config({"DYN_BATCH_ENABLED": "YES"}, device) - net = ie_core.read_network(test_net_xml, test_net_bin) - net.batch_size = 10 + func = ie_core.read_model(test_net_xml, test_net_bin) + func.batch_size = 10 data = np.ones(shape=net.input_info["data"].input_data.shape) - exec_net = ie_core.load_network(net, device) + exec_net = ie_core.compile_model(net, device) data[0] = read_image()[0] request = exec_net.create_infer_request() request.set_batch(1) @@ -60,38 +60,38 @@ def test_set_batch_size(device): @pytest.mark.skip(reason="Fix") def test_set_zero_batch_size(device): ie_core = Core() - net = ie_core.read_network(test_net_xml, test_net_bin) - exec_net = ie_core.load_network(net, device) + func = ie_core.read_model(test_net_xml, test_net_bin) + exec_net = ie_core.compile_model(func, device) request = exec_net.create_infer_request() with pytest.raises(ValueError) as e: request.set_batch(0) assert "Batch size should be positive integer number but 0 specified" in str(e.value) del exec_net del ie_core - del net + del func @pytest.mark.skip(reason="Fix") def test_set_negative_batch_size(device): ie_core = Core() - net = ie_core.read_network(test_net_xml, test_net_bin) - exec_net = ie_core.load_network(net, device) + func = ie_core.read_model(test_net_xml, test_net_bin) + exec_net = ie_core.compile_model(func, device) request = exec_net.create_infer_request() with pytest.raises(ValueError) as e: request.set_batch(-1) assert "Batch size should be positive integer number but -1 specified" in str(e.value) del exec_net del ie_core - del net + del func def test_blob_setter(device): ie_core = Core() - net = ie_core.read_network(test_net_xml, test_net_bin) - exec_net_1 = ie_core.load_network(network=net, device_name=device) + func = ie_core.read_model(test_net_xml, test_net_bin) + exec_net_1 = ie_core.compile_model(network=func, device_name=device) - net.input_info["data"].layout = "NHWC" - exec_net_2 = ie_core.load_network(network=net, device_name=device) + func.input_info["data"].layout = "NHWC" + exec_net_2 = ie_core.compile_model(network=func, device_name=device) img = read_image() @@ -114,8 +114,8 @@ def test_blob_setter(device): def test_cancel(device): ie_core = Core() - net = ie_core.read_network(test_net_xml, test_net_bin) - exec_net = ie_core.load_network(net, device) + func = ie_core.read_model(test_net_xml, test_net_bin) + exec_net = ie_core.compile_model(func, device) img = read_image() td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW") input_blob = Blob(td, img) diff --git a/runtime/bindings/python/tests/test_onnx/test_onnx_external_data.py b/runtime/bindings/python/tests/test_onnx/test_onnx_external_data.py index cbeb316c79ad6c..8fd4fb89ca582d 100644 --- a/runtime/bindings/python/tests/test_onnx/test_onnx_external_data.py +++ b/runtime/bindings/python/tests/test_onnx/test_onnx_external_data.py @@ -12,9 +12,7 @@ def test_import_onnx_with_external_data(): model_path = os.path.join(os.path.dirname(__file__), "models/external_data.onnx") ie = Core() - network = ie.read_network(model=model_path) - - ng_function = network.get_function() + func = ie.read_model(model=model_path) dtype = np.float32 value_a = np.array([1.0, 3.0, 5.0], dtype=dtype) @@ -22,6 +20,6 @@ def test_import_onnx_with_external_data(): # third input [5.0, 1.0, 3.0] read from external file runtime = get_runtime() - computation = runtime.computation(ng_function) + computation = runtime.computation(func) result = computation(value_a, value_b) assert np.allclose(result, np.array([3.0, 3.0, 3.0], dtype=dtype)) diff --git a/runtime/bindings/python/tests/test_onnx/test_onnx_import.py b/runtime/bindings/python/tests/test_onnx/test_onnx_import.py index 2886ff592f3ef3..7c59a9462c989d 100644 --- a/runtime/bindings/python/tests/test_onnx/test_onnx_import.py +++ b/runtime/bindings/python/tests/test_onnx/test_onnx_import.py @@ -15,9 +15,7 @@ def test_import_onnx_function(): model_path = os.path.join(os.path.dirname(__file__), "models/add_abc.onnx") ie = Core() - network = ie.read_network(model=model_path) - - ng_function = network.get_function() + func = ie.read_model(model=model_path) dtype = np.float32 value_a = np.array([1.0], dtype=dtype) @@ -25,7 +23,7 @@ def test_import_onnx_function(): value_c = np.array([3.0], dtype=dtype) runtime = get_runtime() - computation = runtime.computation(ng_function) + computation = runtime.computation(func) result = computation(value_a, value_b, value_c) assert np.allclose(result, np.array([6], dtype=dtype)) diff --git a/runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py b/runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py index 53c5487d31476f..7c7c7f2c7d8654 100644 --- a/runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py +++ b/runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py @@ -5,7 +5,7 @@ import onnx from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE -from openvino import Core, Blob, TensorDesc +from openvino import Core, Tensor from openvino.impl import Function @@ -21,9 +21,7 @@ def np_dtype_to_tensor_type(data_type: np.dtype) -> int: def import_onnx_model(model: onnx.ModelProto) -> Function: onnx.checker.check_model(model) model_byte_string = model.SerializeToString() - ie = Core() - ie_network = ie.read_network(bytes(model_byte_string), Blob(TensorDesc("U8", [], "C"))) + func = ie.read_model(bytes(model_byte_string), Tensor(np.array([], dtype=np.uint8))) - ng_function = ie_network.get_function() - return ng_function + return func From 5998235368aac3465b5582f4efbdb59c40fcda96 Mon Sep 17 00:00:00 2001 From: Bartek Szmelczynski Date: Tue, 26 Oct 2021 17:18:49 +0200 Subject: [PATCH 02/46] Output const node python tests (#52) * add python bindings tests for Output * add proper tests * add new line --- .../test_output_const_node.py | 84 +++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 runtime/bindings/python/tests/test_inference_engine/test_output_const_node.py diff --git a/runtime/bindings/python/tests/test_inference_engine/test_output_const_node.py b/runtime/bindings/python/tests/test_inference_engine/test_output_const_node.py new file mode 100644 index 00000000000000..522e516a7b0a8e --- /dev/null +++ b/runtime/bindings/python/tests/test_inference_engine/test_output_const_node.py @@ -0,0 +1,84 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os + +from ..conftest import model_path +from openvino.impl import ConstOutput, Shape, PartialShape, Type + +from openvino import Core + +is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD" +test_net_xml, test_net_bin = model_path(is_myriad) + + +def model_path(is_myriad=False): + path_to_repo = os.environ["MODELS_PATH"] + if not is_myriad: + test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml") + test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin") + else: + test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml") + test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin") + return (test_xml, test_bin) + + +def test_const_output_type(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + node = exec_net.input(0) + assert isinstance(node, ConstOutput) + + +def test_const_output_docs(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + node = exec_net.input(0) + exptected_string = "openvino.impl.ConstOutput wraps ov::Output" + assert node.__doc__ == exptected_string + + +def test_const_output_get_index(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + node = exec_net.input("data") + assert node.get_index() == 0 + + +def test_const_output_get_element_type(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + node = exec_net.input("data") + assert node.get_element_type() == Type.f32 + + +def test_const_output_get_shape(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + node = exec_net.input("data") + expected_shape = Shape([1, 3, 32, 32]) + assert str(node.get_shape()) == str(expected_shape) + + +def test_const_output_get_partial_shape(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + node = exec_net.input("data") + expected_partial_shape = PartialShape([1, 3, 32, 32]) + assert node.get_partial_shape() == expected_partial_shape + + +def test_const_output_get_target_inputs(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) + outputs = exec_net.outputs + for node in outputs: + assert isinstance(node.get_target_inputs(), set) + From 74ca0845fa3abd1c3934ef1a36004113c0f39f67 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Fri, 29 Oct 2021 18:49:32 +0300 Subject: [PATCH 03/46] rename ie_version to version --- .../python/src/pyopenvino/core/{ie_version.cpp => version.cpp} | 0 .../python/src/pyopenvino/core/{ie_version.hpp => version.hpp} | 0 runtime/bindings/python/src/pyopenvino/pyopenvino.cpp | 2 +- 3 files changed, 1 insertion(+), 1 deletion(-) rename runtime/bindings/python/src/pyopenvino/core/{ie_version.cpp => version.cpp} (100%) rename runtime/bindings/python/src/pyopenvino/core/{ie_version.hpp => version.hpp} (100%) diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_version.cpp b/runtime/bindings/python/src/pyopenvino/core/version.cpp similarity index 100% rename from runtime/bindings/python/src/pyopenvino/core/ie_version.cpp rename to runtime/bindings/python/src/pyopenvino/core/version.cpp diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_version.hpp b/runtime/bindings/python/src/pyopenvino/core/version.hpp similarity index 100% rename from runtime/bindings/python/src/pyopenvino/core/ie_version.hpp rename to runtime/bindings/python/src/pyopenvino/core/version.hpp diff --git a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp index 8384e753134ad9..42df5384ec5d45 100644 --- a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -31,8 +31,8 @@ #include "pyopenvino/core/ie_network.hpp" #include "pyopenvino/core/ie_parameter.hpp" #include "pyopenvino/core/ie_preprocess_info.hpp" -#include "pyopenvino/core/ie_version.hpp" #include "pyopenvino/core/offline_transformations.hpp" +#include "pyopenvino/core/version.hpp" #include "pyopenvino/core/tensor.hpp" #include "pyopenvino/core/tensor_description.hpp" #include "pyopenvino/graph/dimension.hpp" From dec4cb081227b506febfa93bbf42c980647c2c5e Mon Sep 17 00:00:00 2001 From: Piotr Szmelczynski Date: Wed, 3 Nov 2021 09:44:14 +0100 Subject: [PATCH 04/46] Pszmel/bind infer request (#51) * remove set_batch, get_blob and set_blob * update InferRequest class * change InferenceEngine::InferRequest to ov::runtime::InferRequest * update set_callback body * update bindings to reflect ov::runtime::InferRequest * bind set_input_tensor and get_input_tensor * style fix * clen ie_infer_queue.cpp * Bind exec core ov (#50) * bind core, exec_net classes * rm unused function * add new line * rename ie_infer_request -> infer_request * update imports * update __init__.py * update ie_api.py * Replace old containers with the new one * create impl for create_infer_request * comment out infer_queue to avoid errors with old infer_request * update infer_request bind to reflect new infer_request api * comment out inpuit_info from ie_network to avoid errors with old containers * Register new containers and comment out InferQueue * update infer request tests * style fix * remove unused imports * remove unused imports and 2 methods * add tests to cover all new methods from infer_request * style fix * add test * remove registration of InferResults * update name of exception_ptr parameter * update the loops that iterate through inputs and outputs * clean setCustomCallbacks * style fix * add Tensor import * style fix * update infer and normalize_inputs * style fix * rename startTime and endTime * Create test for mixed keys as infer arguments * update infer function * update return type of infer Co-authored-by: Bartek Szmelczynski --- .../bindings/python/src/openvino/__init__.py | 10 +- .../bindings/python/src/openvino/ie_api.py | 18 +- .../python/src/pyopenvino/core/common.hpp | 1 + .../python/src/pyopenvino/core/containers.cpp | 38 +-- .../python/src/pyopenvino/core/containers.hpp | 26 +- .../pyopenvino/core/executable_network.cpp | 6 +- .../src/pyopenvino/core/ie_infer_queue.cpp | 209 +++++++------- .../src/pyopenvino/core/ie_infer_request.cpp | 210 -------------- .../src/pyopenvino/core/ie_infer_request.hpp | 43 --- .../python/src/pyopenvino/core/ie_network.cpp | 9 - .../src/pyopenvino/core/infer_request.cpp | 266 ++++++++++++++++++ .../src/pyopenvino/core/infer_request.hpp | 47 ++++ .../python/src/pyopenvino/core/version.cpp | 4 +- .../python/src/pyopenvino/pyopenvino.cpp | 12 +- .../test_infer_request.py | 184 ++++++------ 15 files changed, 552 insertions(+), 531 deletions(-) delete mode 100644 runtime/bindings/python/src/pyopenvino/core/ie_infer_request.cpp delete mode 100644 runtime/bindings/python/src/pyopenvino/core/ie_infer_request.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/core/infer_request.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/core/infer_request.hpp diff --git a/runtime/bindings/python/src/openvino/__init__.py b/runtime/bindings/python/src/openvino/__init__.py index 9ece8649f652e2..5634960d064d8c 100644 --- a/runtime/bindings/python/src/openvino/__init__.py +++ b/runtime/bindings/python/src/openvino/__init__.py @@ -15,8 +15,7 @@ from openvino.ie_api import BlobWrapper from openvino.ie_api import infer -from openvino.ie_api import async_infer -from openvino.ie_api import get_result +from openvino.ie_api import start_async from openvino.ie_api import blob_from_file from openvino.ie_api import tensor_from_file @@ -37,7 +36,7 @@ from openvino.pyopenvino import TensorDesc from openvino.pyopenvino import get_version from openvino.pyopenvino import StatusCode -from openvino.pyopenvino import InferQueue +#from openvino.pyopenvino import InferQueue from openvino.pyopenvino import InferRequest # TODO: move to ie_api? from openvino.pyopenvino import Blob from openvino.pyopenvino import PreProcessInfo @@ -82,7 +81,6 @@ ExecutableNetwork.infer = infer # Patching InferRequest InferRequest.infer = infer -InferRequest.async_infer = async_infer -InferRequest.get_result = get_result +InferRequest.start_async = start_async # Patching InferQueue -InferQueue.async_infer = async_infer +#InferQueue.async_infer = async_infer diff --git a/runtime/bindings/python/src/openvino/ie_api.py b/runtime/bindings/python/src/openvino/ie_api.py index 29588032458062..7a208a0fc926ff 100644 --- a/runtime/bindings/python/src/openvino/ie_api.py +++ b/runtime/bindings/python/src/openvino/ie_api.py @@ -2,6 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 import numpy as np +import copy from openvino.pyopenvino import TBlobFloat32 from openvino.pyopenvino import TBlobFloat64 @@ -36,22 +37,17 @@ def normalize_inputs(py_dict: dict) -> dict: """Normalize a dictionary of inputs to contiguous numpy arrays.""" - return {k: (np.ascontiguousarray(v) if isinstance(v, np.ndarray) else v) + return {k: (Tensor(v) if isinstance(v, np.ndarray) else v) for k, v in py_dict.items()} # flake8: noqa: D102 -def infer(request: InferRequest, inputs: dict = None) -> dict: - results = request._infer(inputs=normalize_inputs(inputs if inputs is not None else {})) - return {name: (blob.buffer.copy()) for name, blob in results.items()} +def infer(request: InferRequest, inputs: dict = None) -> np.ndarray: + res = request._infer(inputs=normalize_inputs(inputs if inputs is not None else {})) + return np.asarray([copy.deepcopy(tensor.data) for tensor in res]) # flake8: noqa: D102 -def get_result(request: InferRequest, name: str) -> np.ndarray: - return request.get_blob(name).buffer.copy() - -# flake8: noqa: D102 -def async_infer(request: InferRequest, inputs: dict = None, userdata=None) -> None: # type: ignore - request._async_infer(inputs=normalize_inputs(inputs if inputs is not None else {}), - userdata=userdata) +def start_async(request: InferRequest, inputs: dict = None) -> None: # type: ignore + request._start_async(inputs=normalize_inputs(inputs if inputs is not None else {})) # flake8: noqa: C901 # Dispatch Blob types on Python side. diff --git a/runtime/bindings/python/src/pyopenvino/core/common.hpp b/runtime/bindings/python/src/pyopenvino/core/common.hpp index 314a8290244581..f9ca68fdabc982 100644 --- a/runtime/bindings/python/src/pyopenvino/core/common.hpp +++ b/runtime/bindings/python/src/pyopenvino/core/common.hpp @@ -53,4 +53,5 @@ namespace Common void set_request_blobs(InferenceEngine::InferRequest& request, const py::dict& dictonary); uint32_t get_optimal_number_of_requests(const InferenceEngine::ExecutableNetwork& actual); + }; // namespace Common diff --git a/runtime/bindings/python/src/pyopenvino/core/containers.cpp b/runtime/bindings/python/src/pyopenvino/core/containers.cpp index 096b6074325815..e91765e8f73aec 100644 --- a/runtime/bindings/python/src/pyopenvino/core/containers.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/containers.cpp @@ -8,44 +8,18 @@ #include #include -PYBIND11_MAKE_OPAQUE(Containers::PyInputsDataMap); -PYBIND11_MAKE_OPAQUE(Containers::PyConstInputsDataMap); -PYBIND11_MAKE_OPAQUE(Containers::PyOutputsDataMap); -PYBIND11_MAKE_OPAQUE(Containers::PyResults); +PYBIND11_MAKE_OPAQUE(Containers::TensorIndexMap); +PYBIND11_MAKE_OPAQUE(Containers::TensorNameMap); namespace py = pybind11; namespace Containers { -void regclass_PyInputsDataMap(py::module m) { - auto py_inputs_data_map = py::bind_map(m, "PyInputsDataMap"); - - py_inputs_data_map.def("keys", [](PyInputsDataMap& self) { - return py::make_key_iterator(self.begin(), self.end()); - }); -} - -void regclass_PyConstInputsDataMap(py::module m) { - auto py_const_inputs_data_map = py::bind_map(m, "PyConstInputsDataMap"); - - py_const_inputs_data_map.def("keys", [](PyConstInputsDataMap& self) { - return py::make_key_iterator(self.begin(), self.end()); - }); +void regclass_TensorIndexMap(py::module m) { + auto tensor_index_map = py::bind_map(m, "TensorIndexMap"); } -void regclass_PyOutputsDataMap(py::module m) { - auto py_outputs_data_map = py::bind_map(m, "PyOutputsDataMap"); - - py_outputs_data_map.def("keys", [](PyOutputsDataMap& self) { - return py::make_key_iterator(self.begin(), self.end()); - }); -} - -void regclass_PyResults(py::module m) { - auto py_results = py::bind_map(m, "PyResults"); - - py_results.def("keys", [](PyResults& self) { - return py::make_key_iterator(self.begin(), self.end()); - }); +void regclass_TensorNameMap(py::module m) { + auto tensor_name_map = py::bind_map(m, "TensorNameMap"); } } // namespace Containers diff --git a/runtime/bindings/python/src/pyopenvino/core/containers.hpp b/runtime/bindings/python/src/pyopenvino/core/containers.hpp index 511d9053ea50fa..30a8fac6440403 100644 --- a/runtime/bindings/python/src/pyopenvino/core/containers.hpp +++ b/runtime/bindings/python/src/pyopenvino/core/containers.hpp @@ -5,27 +5,17 @@ #pragma once #include +#include #include -#include -#include "ie_data.h" -#include "ie_blob.h" +#include namespace py = pybind11; namespace Containers { - using PyInputsDataMap = std::map>; + using TensorIndexMap = std::map; + using TensorNameMap = std::map; + using InferResults = std::vector; - using PyConstInputsDataMap = - std::map>; - - using PyOutputsDataMap = - std::map>; - - using PyResults = - std::map>; - - void regclass_PyInputsDataMap(py::module m); - void regclass_PyConstInputsDataMap(py::module m); - void regclass_PyOutputsDataMap(py::module m); - void regclass_PyResults(py::module m); -} \ No newline at end of file + void regclass_TensorIndexMap(py::module m); + void regclass_TensorNameMap(py::module m); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/executable_network.cpp b/runtime/bindings/python/src/pyopenvino/core/executable_network.cpp index 82817b68484f74..a5b04aab2c18cb 100644 --- a/runtime/bindings/python/src/pyopenvino/core/executable_network.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/executable_network.cpp @@ -7,8 +7,8 @@ #include "common.hpp" #include "pyopenvino/core/containers.hpp" -#include "pyopenvino/core/ie_infer_request.hpp" #include "pyopenvino/core/ie_input_info.hpp" +#include "pyopenvino/core/infer_request.hpp" namespace py = pybind11; @@ -17,7 +17,9 @@ void regclass_ExecutableNetwork(py::module m) { m, "ExecutableNetwork"); - cls.def("create_infer_request", &ov::runtime::ExecutableNetwork::create_infer_request); + cls.def("create_infer_request", [](ov::runtime::ExecutableNetwork& self) { + return InferRequestWrapper(self.create_infer_request(), self.inputs(), self.outputs()); + }); // cls.def("infer_new_request", [](ov::runtime::ExecutableNetwork& self, const py::dict& inputs) { // TODO: implment after https://github.com/openvinotoolkit/openvino/pull/7962 diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.cpp index d54e7cce69c9ff..e80cd33105f01b 100644 --- a/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.cpp @@ -18,7 +18,7 @@ #include #include "pyopenvino/core/common.hpp" -#include "pyopenvino/core/ie_infer_request.hpp" +#include "pyopenvino/core/infer_request.hpp" #define INVALID_ID -1 @@ -59,16 +59,9 @@ class InferQueue { size_t request_id = _idle_handles.front(); - InferenceEngine::StatusCode status = - _requests[request_id]._request.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); - - if (status == InferenceEngine::StatusCode::RESULT_NOT_READY) { - status = _requests[request_id]._request.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); - } - py::dict request_info = py::dict(); request_info["id"] = request_id; - request_info["status"] = status; + // request_info["status"] = true; // TODO return request_info; } @@ -87,7 +80,7 @@ class InferQueue { return idle_request_id; } - std::vector waitAll() { + std::vector waitAll() { // Wait for all requests to return with callback thus updating // _idle_handles so it matches the size of requests py::gil_scoped_release release; @@ -96,10 +89,10 @@ class InferQueue { return _idle_handles.size() == _requests.size(); }); - std::vector statuses; + std::vector statuses; for (size_t handle = 0; handle < _requests.size(); handle++) { - statuses.push_back(_requests[handle]._request.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY)); + statuses.push_back(_requests[handle]._request.wait_for(std::chrono::milliseconds(0))); } return statuses; @@ -107,8 +100,8 @@ class InferQueue { void setDefaultCallbacks() { for (size_t handle = 0; handle < _requests.size(); handle++) { - _requests[handle]._request.SetCompletionCallback([this, handle /* ... */]() { - _requests[handle]._endTime = Time::now(); + _requests[handle]._request.set_callback([this, handle /* ... */](std::exception_ptr exception_ptr) { + _requests[handle]._end_time = Time::now(); // Add idle handle to queue _idle_handles.push(handle); // Notify locks in getIdleRequestId() or waitAll() functions @@ -119,16 +112,18 @@ class InferQueue { void setCustomCallbacks(py::function f_callback) { for (size_t handle = 0; handle < _requests.size(); handle++) { - _requests[handle]._request.SetCompletionCallback([this, f_callback, handle /* ... */]() { - _requests[handle]._endTime = Time::now(); - InferenceEngine::StatusCode statusCode = - _requests[handle]._request.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); - if (statusCode == InferenceEngine::StatusCode::RESULT_NOT_READY) { - statusCode = InferenceEngine::StatusCode::OK; + _requests[handle]._request.set_callback([this, f_callback, handle](std::exception_ptr exception_ptr) { + _requests[handle]._end_time = Time::now(); + try { + if (exception_ptr) { + std::rethrow_exception(exception_ptr); + } + } catch (const std::exception& e) { + IE_THROW() << "Caught exception: " << e.what(); } // Acquire GIL, execute Python function py::gil_scoped_acquire acquire; - f_callback(_requests[handle], statusCode, _user_ids[handle]); + f_callback(_requests[handle], _user_ids[handle]); // Add idle handle to queue _idle_handles.push(handle); // Notify locks in getIdleRequestId() or waitAll() functions @@ -145,89 +140,89 @@ class InferQueue { std::condition_variable _cv; }; -void regclass_InferQueue(py::module m) { - py::class_> cls(m, "InferQueue"); - - cls.def(py::init([](InferenceEngine::ExecutableNetwork& net, size_t jobs) { - if (jobs == 0) { - const InferenceEngine::ExecutableNetwork& _net = net; - jobs = (size_t)Common::get_optimal_number_of_requests(_net); - } - - std::vector requests; - std::queue idle_handles; - std::vector user_ids(jobs); - - for (size_t handle = 0; handle < jobs; handle++) { - auto request = InferRequestWrapper(net.CreateInferRequest()); - // Get Inputs and Outputs info from executable network - request._inputsInfo = net.GetInputsInfo(); - request._outputsInfo = net.GetOutputsInfo(); - - requests.push_back(request); - idle_handles.push(handle); - } - - return new InferQueue(requests, idle_handles, user_ids); - }), - py::arg("network"), - py::arg("jobs") = 0); - - cls.def( - "_async_infer", - [](InferQueue& self, const py::dict inputs, py::object userdata) { - // getIdleRequestId function has an intention to block InferQueue - // until there is at least one idle (free to use) InferRequest - auto handle = self.getIdleRequestId(); - // Set new inputs label/id from user - self._user_ids[handle] = userdata; - // Update inputs of picked InferRequest - if (!inputs.empty()) { - Common::set_request_blobs(self._requests[handle]._request, inputs); - } - // Now GIL can be released - we are NOT working with Python objects in this block - { - py::gil_scoped_release release; - self._requests[handle]._startTime = Time::now(); - // Start InferRequest in asynchronus mode - self._requests[handle]._request.StartAsync(); - } - }, - py::arg("inputs"), - py::arg("userdata")); - - cls.def("is_ready", [](InferQueue& self) { - return self._is_ready(); - }); - - cls.def("wait_all", [](InferQueue& self) { - return self.waitAll(); - }); - - cls.def("get_idle_request_info", [](InferQueue& self) { - return self._getIdleRequestInfo(); - }); - - cls.def("set_infer_callback", [](InferQueue& self, py::function f_callback) { - self.setCustomCallbacks(f_callback); - }); - - cls.def("__len__", [](InferQueue& self) { - return self._requests.size(); - }); - - cls.def( - "__iter__", - [](InferQueue& self) { - return py::make_iterator(self._requests.begin(), self._requests.end()); - }, - py::keep_alive<0, 1>()); /* Keep set alive while iterator is used */ - - cls.def("__getitem__", [](InferQueue& self, size_t i) { - return self._requests[i]; - }); - - cls.def_property_readonly("userdata", [](InferQueue& self) { - return self._user_ids; - }); -} +// void regclass_InferQueue(py::module m) { +// py::class_> cls(m, "InferQueue"); + +// cls.def(py::init([](InferenceEngine::ExecutableNetwork& net, size_t jobs) { +// if (jobs == 0) { +// const InferenceEngine::ExecutableNetwork& _net = net; +// jobs = (size_t)Common::get_optimal_number_of_requests(_net); +// } + +// std::vector requests; +// std::queue idle_handles; +// std::vector user_ids(jobs); + +// for (size_t handle = 0; handle < jobs; handle++) { +// auto request = InferRequestWrapper(net.CreateInferRequest()); +// // Get Inputs and Outputs info from executable network +// request._inputsInfo = net.GetInputsInfo(); +// request._outputsInfo = net.GetOutputsInfo(); + +// requests.push_back(request); +// idle_handles.push(handle); +// } + +// return new InferQueue(requests, idle_handles, user_ids); +// }), +// py::arg("network"), +// py::arg("jobs") = 0); + +// cls.def( +// "_async_infer", +// [](InferQueue& self, const py::dict inputs, py::object userdata) { +// // getIdleRequestId function has an intention to block InferQueue +// // until there is at least one idle (free to use) InferRequest +// auto handle = self.getIdleRequestId(); +// // Set new inputs label/id from user +// self._user_ids[handle] = userdata; +// // Update inputs of picked InferRequest +// if (!inputs.empty()) { +// Common::set_request_blobs(self._requests[handle]._request, inputs); +// } +// // Now GIL can be released - we are NOT working with Python objects in this block +// { +// py::gil_scoped_release release; +// self._requests[handle]._start_time = Time::now(); +// // Start InferRequest in asynchronus mode +// self._requests[handle]._request.start_async(); +// } +// }, +// py::arg("inputs"), +// py::arg("userdata")); + +// cls.def("is_ready", [](InferQueue& self) { +// return self._is_ready(); +// }); + +// cls.def("wait_all", [](InferQueue& self) { +// return self.waitAll(); +// }); + +// cls.def("get_idle_request_info", [](InferQueue& self) { +// return self._getIdleRequestInfo(); +// }); + +// cls.def("set_infer_callback", [](InferQueue& self, py::function f_callback) { +// self.setCustomCallbacks(f_callback); +// }); + +// cls.def("__len__", [](InferQueue& self) { +// return self._requests.size(); +// }); + +// cls.def( +// "__iter__", +// [](InferQueue& self) { +// return py::make_iterator(self._requests.begin(), self._requests.end()); +// }, +// py::keep_alive<0, 1>()); /* Keep set alive while iterator is used */ + +// cls.def("__getitem__", [](InferQueue& self, size_t i) { +// return self._requests[i]; +// }); + +// cls.def_property_readonly("userdata", [](InferQueue& self) { +// return self._user_ids; +// }); +// } diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.cpp deleted file mode 100644 index 52d250f92ee7b7..00000000000000 --- a/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.cpp +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (C) 2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -#include "pyopenvino/core/ie_infer_request.hpp" - -#include -#include - -#include - -#include "pyopenvino/core/common.hpp" -#include "pyopenvino/core/containers.hpp" -#include "pyopenvino/core/executable_network.hpp" -#include "pyopenvino/core/ie_preprocess_info.hpp" - -namespace py = pybind11; - -void regclass_InferRequest(py::module m) { - py::class_> cls(m, "InferRequest"); - - cls.def( - "set_batch", - [](InferRequestWrapper& self, const int size) { - self._request.SetBatch(size); - }, - py::arg("size")); - - cls.def( - "get_blob", - [](InferRequestWrapper& self, const std::string& name) { - return self._request.GetBlob(name); - }, - py::arg("name")); - - cls.def( - "set_blob", - [](InferRequestWrapper& self, const std::string& name, py::handle& blob) { - self._request.SetBlob(name, Common::cast_to_blob(blob)); - }, - py::arg("name"), - py::arg("blob")); - - cls.def( - "set_blob", - [](InferRequestWrapper& self, - const std::string& name, - py::handle& blob, - const InferenceEngine::PreProcessInfo& info) { - self._request.SetBlob(name, Common::cast_to_blob(blob)); - }, - py::arg("name"), - py::arg("blob"), - py::arg("info")); - - cls.def( - "set_input", - [](InferRequestWrapper& self, const py::dict& inputs) { - Common::set_request_blobs(self._request, inputs); - }, - py::arg("inputs")); - - cls.def( - "set_output", - [](InferRequestWrapper& self, const py::dict& results) { - Common::set_request_blobs(self._request, results); - }, - py::arg("results")); - - cls.def( - "_infer", - [](InferRequestWrapper& self, const py::dict& inputs) { - // Update inputs if there are any - if (!inputs.empty()) { - Common::set_request_blobs(self._request, inputs); - } - // Call Infer function - self._startTime = Time::now(); - self._request.Infer(); - self._endTime = Time::now(); - // Get output Blobs and return - Containers::PyResults results; - for (auto& out : self._outputsInfo) { - results[out.first] = self._request.GetBlob(out.first); - } - return results; - }, - py::arg("inputs")); - - cls.def( - "_async_infer", - [](InferRequestWrapper& self, const py::dict inputs, py::object userdata) { - py::gil_scoped_release release; - if (!inputs.empty()) { - Common::set_request_blobs(self._request, inputs); - } - // TODO: check for None so next async infer userdata can be updated - // if (!userdata.empty()) - // { - // if (user_callback_defined) - // { - // self._request.SetCompletionCallback([self, userdata]() { - // // py::gil_scoped_acquire acquire; - // auto statusCode = const_cast(self).Wait( - // InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); - // self._request.user_callback(self, statusCode, userdata); - // // py::gil_scoped_release release; - // }); - // } - // else - // { - // py::print("There is no callback function!"); - // } - // } - self._startTime = Time::now(); - self._request.StartAsync(); - }, - py::arg("inputs"), - py::arg("userdata")); - - cls.def("cancel", [](InferRequestWrapper& self) { - self._request.Cancel(); - }); - - cls.def( - "wait", - [](InferRequestWrapper& self, int64_t millis_timeout) { - py::gil_scoped_release release; - return self._request.Wait(millis_timeout); - }, - py::arg("millis_timeout") = InferenceEngine::IInferRequest::WaitMode::RESULT_READY); - - cls.def( - "set_completion_callback", - [](InferRequestWrapper& self, py::function f_callback, py::object userdata) { - self._request.SetCompletionCallback([&self, f_callback, userdata]() { - self._endTime = Time::now(); - InferenceEngine::StatusCode statusCode = - self._request.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); - if (statusCode == InferenceEngine::StatusCode::RESULT_NOT_READY) { - statusCode = InferenceEngine::StatusCode::OK; - } - // Acquire GIL, execute Python function - py::gil_scoped_acquire acquire; - f_callback(self, statusCode, userdata); - }); - }, - py::arg("f_callback"), - py::arg("userdata")); - - cls.def("get_perf_counts", [](InferRequestWrapper& self) { - std::map perfMap; - perfMap = self._request.GetPerformanceCounts(); - py::dict perf_map; - - for (auto it : perfMap) { - py::dict profile_info; - switch (it.second.status) { - case InferenceEngine::InferenceEngineProfileInfo::EXECUTED: - profile_info["status"] = "EXECUTED"; - break; - case InferenceEngine::InferenceEngineProfileInfo::NOT_RUN: - profile_info["status"] = "NOT_RUN"; - break; - case InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT: - profile_info["status"] = "OPTIMIZED_OUT"; - break; - default: - profile_info["status"] = "UNKNOWN"; - } - profile_info["exec_type"] = it.second.exec_type; - profile_info["layer_type"] = it.second.layer_type; - profile_info["cpu_time"] = it.second.cpu_uSec; - profile_info["real_time"] = it.second.realTime_uSec; - profile_info["execution_index"] = it.second.execution_index; - perf_map[it.first.c_str()] = profile_info; - } - return perf_map; - }); - - cls.def( - "preprocess_info", - [](InferRequestWrapper& self, const std::string& name) { - return self._request.GetPreProcess(name); - }, - py::arg("name")); - - // cls.def_property_readonly("preprocess_info", [](InferRequestWrapper& self) { - // - // }); - - cls.def_property_readonly("input_blobs", [](InferRequestWrapper& self) { - Containers::PyResults input_blobs; - for (auto& in : self._inputsInfo) { - input_blobs[in.first] = self._request.GetBlob(in.first); - } - return input_blobs; - }); - - cls.def_property_readonly("output_blobs", [](InferRequestWrapper& self) { - Containers::PyResults output_blobs; - for (auto& out : self._outputsInfo) { - output_blobs[out.first] = self._request.GetBlob(out.first); - } - return output_blobs; - }); - - cls.def_property_readonly("latency", [](InferRequestWrapper& self) { - return self.getLatency(); - }); -} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.hpp b/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.hpp deleted file mode 100644 index 13afbac440360d..00000000000000 --- a/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include - -#include -#include -#include - -namespace py = pybind11; - -typedef std::chrono::high_resolution_clock Time; -typedef std::chrono::nanoseconds ns; - -class InferRequestWrapper { -public: - InferRequestWrapper(InferenceEngine::InferRequest request) - : _request(request) - { - } - // ~InferRequestWrapper() = default; - - // bool user_callback_defined; - // py::function user_callback; - - double getLatency() { - auto execTime = std::chrono::duration_cast(_endTime - _startTime); - return static_cast(execTime.count()) * 0.000001; - } - - InferenceEngine::InferRequest _request; - InferenceEngine::ConstInputsDataMap _inputsInfo; - InferenceEngine::ConstOutputsDataMap _outputsInfo; - Time::time_point _startTime; - Time::time_point _endTime; -}; - -void regclass_InferRequest(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_network.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_network.cpp index b57765f19cf55c..e06f9bf79bb4c3 100644 --- a/runtime/bindings/python/src/pyopenvino/core/ie_network.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/ie_network.cpp @@ -79,15 +79,6 @@ void regclass_IENetwork(py::module m) { &InferenceEngine::CNNNetwork::getBatchSize, &InferenceEngine::CNNNetwork::setBatchSize); - cls.def_property_readonly("input_info", [](InferenceEngine::CNNNetwork& self) { - Containers::PyInputsDataMap inputs; - const InferenceEngine::InputsDataMap& inputsInfo = self.getInputsInfo(); - for (auto& in : inputsInfo) { - inputs[in.first] = in.second; - } - return inputs; - }); - cls.def_property_readonly("outputs", [](InferenceEngine::CNNNetwork& self) { return self.getOutputsInfo(); }); diff --git a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp new file mode 100644 index 00000000000000..190fbe0d07b053 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp @@ -0,0 +1,266 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "pyopenvino/core/infer_request.hpp" + +#include +#include +#include +#include + +#include + +#include "pyopenvino/core/containers.hpp" + +namespace py = pybind11; + +void regclass_InferRequest(py::module m) { + py::class_> cls(m, "InferRequest"); + cls.def( + "set_tensors", + [](InferRequestWrapper& self, const Containers::TensorNameMap& inputs) { + for (auto&& input : inputs) { + self._request.set_tensor(input.first, input.second); + } + }, + py::arg("inputs")); + + cls.def( + "set_output_tensors", + [](InferRequestWrapper& self, const Containers::TensorIndexMap& outputs) { + for (auto&& output : outputs) { + self._request.set_output_tensor(output.first, output.second); + } + }, + py::arg("outputs")); + + cls.def( + "set_input_tensors", + [](InferRequestWrapper& self, const Containers::TensorIndexMap& inputs) { + for (auto&& input : inputs) { + self._request.set_input_tensor(input.first, input.second); + } + }, + py::arg("inputs")); + + cls.def( + "_infer", + [](InferRequestWrapper& self, const Containers::TensorIndexMap& inputs) { + // Update inputs if there are any + for (auto&& input : inputs) { + self._request.set_input_tensor(input.first, input.second); + } + // Call Infer function + self._start_time = Time::now(); + self._request.infer(); + self._end_time = Time::now(); + Containers::InferResults results; + for (auto& out : self._outputs) { + results.push_back(self._request.get_tensor(out)); + } + return results; + }, + py::arg("inputs")); + + cls.def( + "_infer", + [](InferRequestWrapper& self, const Containers::TensorNameMap& inputs) { + // Update inputs if there are any + for (auto&& input : inputs) { + self._request.set_tensor(input.first, input.second); + } + // Call Infer function + self._start_time = Time::now(); + self._request.infer(); + self._end_time = Time::now(); + Containers::InferResults results; + for (auto& out : self._outputs) { + results.push_back(self._request.get_tensor(out)); + } + return results; + }, + py::arg("inputs")); + + cls.def( + "_start_async", + [](InferRequestWrapper& self, const Containers::TensorIndexMap& inputs) { + py::gil_scoped_release release; + for (auto&& input : inputs) { + self._request.set_input_tensor(input.first, input.second); + } + // TODO: check for None so next async infer userdata can be updated + // if (!userdata.empty()) + // { + // if (user_callback_defined) + // { + // self._request.SetCompletionCallback([self, userdata]() { + // // py::gil_scoped_acquire acquire; + // auto statusCode = const_cast(self).Wait( + // InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); + // self._request.user_callback(self, statusCode, userdata); + // // py::gil_scoped_release release; + // }); + // } + // else + // { + // py::print("There is no callback function!"); + // } + // } + self._start_time = Time::now(); + self._request.start_async(); + }, + py::arg("inputs")); + + cls.def( + "_start_async", + [](InferRequestWrapper& self, const Containers::TensorNameMap& inputs) { + py::gil_scoped_release release; + for (auto&& input : inputs) { + self._request.set_tensor(input.first, input.second); + } + // TODO: check for None so next async infer userdata can be updated + // if (!userdata.empty()) + // { + // if (user_callback_defined) + // { + // self._request.SetCompletionCallback([self, userdata]() { + // // py::gil_scoped_acquire acquire; + // auto statusCode = const_cast(self).Wait( + // InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); + // self._request.user_callback(self, statusCode, userdata); + // // py::gil_scoped_release release; + // }); + // } + // else + // { + // py::print("There is no callback function!"); + // } + // } + self._start_time = Time::now(); + self._request.start_async(); + }, + py::arg("inputs")); + + cls.def("cancel", [](InferRequestWrapper& self) { + self._request.cancel(); + }); + + cls.def("wait", [](InferRequestWrapper& self) { + py::gil_scoped_release release; + self._request.wait(); + }); + + cls.def( + "wait_for", + [](InferRequestWrapper& self, const int timeout) { + py::gil_scoped_release release; + return self._request.wait_for(std::chrono::milliseconds(timeout)); + }, + py::arg("timeout")); + + cls.def( + "set_callback", + [](InferRequestWrapper& self, py::function f_callback) { + self._request.set_callback([&self, f_callback](std::exception_ptr exception_ptr) { + self._end_time = Time::now(); + try { + if (exception_ptr) { + std::rethrow_exception(exception_ptr); + } + } catch (const std::exception& e) { + IE_THROW() << "Caught exception: " << e.what(); + } + // Acquire GIL, execute Python function + py::gil_scoped_acquire acquire; + f_callback(exception_ptr); + }); + }, + py::arg("f_callback")); + + cls.def( + "get_tensor", + [](InferRequestWrapper& self, const std::string& name) { + return self._request.get_tensor(name); + }, + py::arg("name")); + + cls.def( + "get_tensor", + [](InferRequestWrapper& self, const ov::Output& port) { + return self._request.get_tensor(port); + }, + py::arg("port")); + + cls.def( + "get_tensor", + [](InferRequestWrapper& self, const ov::Output& port) { + return self._request.get_tensor(port); + }, + py::arg("port")); + + cls.def( + "set_tensor", + [](InferRequestWrapper& self, const std::string& name, const ov::runtime::Tensor& tensor) { + self._request.set_tensor(name, tensor); + }, + py::arg("name"), + py::arg("tensor")); + + cls.def( + "set_tensor", + [](InferRequestWrapper& self, const ov::Output& port, const ov::runtime::Tensor& tensor) { + self._request.set_tensor(port, tensor); + }, + py::arg("port"), + py::arg("tensor")); + + cls.def( + "set_tensor", + [](InferRequestWrapper& self, const ov::Output& port, const ov::runtime::Tensor& tensor) { + self._request.set_tensor(port, tensor); + }, + py::arg("port"), + py::arg("tensor")); + + cls.def( + "set_input_tensor", + [](InferRequestWrapper& self, size_t idx, const ov::runtime::Tensor& tensor) { + self._request.set_input_tensor(idx, tensor); + }, + py::arg("idx"), + py::arg("tensor")); + + cls.def( + "set_input_tensor", + [](InferRequestWrapper& self, const ov::runtime::Tensor& tensor) { + self._request.set_input_tensor(tensor); + }, + py::arg("tensor")); + + cls.def( + "set_output_tensor", + [](InferRequestWrapper& self, size_t idx, const ov::runtime::Tensor& tensor) { + self._request.set_output_tensor(idx, tensor); + }, + py::arg("idx"), + py::arg("tensor")); + + cls.def( + "set_output_tensor", + [](InferRequestWrapper& self, const ov::runtime::Tensor& tensor) { + self._request.set_output_tensor(tensor); + }, + py::arg("tensor")); + + cls.def_property_readonly("input_tensors", [](InferRequestWrapper& self) { + return self._inputs; + }); + + cls.def_property_readonly("output_tensors", [](InferRequestWrapper& self) { + return self._outputs; + }); + + cls.def_property_readonly("latency", [](InferRequestWrapper& self) { + return self.get_latency(); + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/infer_request.hpp b/runtime/bindings/python/src/pyopenvino/core/infer_request.hpp new file mode 100644 index 00000000000000..e81261a54a608c --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/infer_request.hpp @@ -0,0 +1,47 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include + +#include + +namespace py = pybind11; + +typedef std::chrono::high_resolution_clock Time; +typedef std::chrono::nanoseconds ns; + +class InferRequestWrapper { +public: + InferRequestWrapper(ov::runtime::InferRequest request) + : _request(request) + { + } + + InferRequestWrapper(ov::runtime::InferRequest request, const std::vector>& inputs, const std::vector>& outputs) + : _request(request), _inputs(inputs), _outputs(outputs) + { + } + // ~InferRequestWrapper() = default; + + // bool user_callback_defined; + // py::function user_callback; + + double get_latency() { + auto execTime = std::chrono::duration_cast(_end_time - _start_time); + return static_cast(execTime.count()) * 0.000001; + } + + ov::runtime::InferRequest _request; + std::vector> _inputs; + std::vector> _outputs; + + Time::time_point _start_time; + Time::time_point _end_time; +}; + +void regclass_InferRequest(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/core/version.cpp b/runtime/bindings/python/src/pyopenvino/core/version.cpp index 45b2b0ed6b30c9..bed253697fda06 100644 --- a/runtime/bindings/python/src/pyopenvino/core/version.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/version.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "openvino/core/version.hpp" +#include + namespace py = pybind11; void regclass_Version(py::module m) { diff --git a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp index 42df5384ec5d45..e716063d4ee4ca 100644 --- a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include "pyopenvino/graph/axis_set.hpp" @@ -26,15 +27,16 @@ #include "pyopenvino/core/ie_blob.hpp" #include "pyopenvino/core/ie_data.hpp" #include "pyopenvino/core/ie_infer_queue.hpp" -#include "pyopenvino/core/ie_infer_request.hpp" #include "pyopenvino/core/ie_input_info.hpp" #include "pyopenvino/core/ie_network.hpp" #include "pyopenvino/core/ie_parameter.hpp" #include "pyopenvino/core/ie_preprocess_info.hpp" #include "pyopenvino/core/offline_transformations.hpp" #include "pyopenvino/core/version.hpp" +#include "pyopenvino/core/infer_request.hpp" #include "pyopenvino/core/tensor.hpp" #include "pyopenvino/core/tensor_description.hpp" +#include "pyopenvino/core/version.hpp" #include "pyopenvino/graph/dimension.hpp" #include "pyopenvino/graph/layout.hpp" #include "pyopenvino/graph/ops/constant.hpp" @@ -139,17 +141,15 @@ PYBIND11_MODULE(pyopenvino, m) { regclass_Tensor(m); // Registering specific types of containers - Containers::regclass_PyInputsDataMap(m); - Containers::regclass_PyConstInputsDataMap(m); - Containers::regclass_PyOutputsDataMap(m); - Containers::regclass_PyResults(m); + Containers::regclass_TensorIndexMap(m); + Containers::regclass_TensorNameMap(m); regclass_ExecutableNetwork(m); regclass_InferRequest(m); regclass_Version(m); regclass_Parameter(m); regclass_InputInfo(m); - regclass_InferQueue(m); + // regclass_InferQueue(m); regclass_PreProcessInfo(m); regmodule_offline_transformations(m); diff --git a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py index 50fbf1db853920..1b2cac4a627881 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py @@ -6,110 +6,111 @@ import pytest from tests.test_inference_engine.helpers import model_path, read_image -from openvino import Core, Blob, TensorDesc, StatusCode +from openvino import Core, Tensor is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD" test_net_xml, test_net_bin = model_path(is_myriad) -def test_get_perf_counts(device): +@pytest.mark.skip(reason="ProfilingInfo has to be bound") +def test_get_profiling_info(device): ie_core = Core() func = ie_core.read_model(test_net_xml, test_net_bin) ie_core.set_config({"PERF_COUNT": "YES"}, device) exec_net = ie_core.compile_model(func, device) img = read_image() request = exec_net.create_infer_request() - td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW") - input_blob = Blob(td, img) - request.set_input({"data": input_blob}) - request.infer() - pc = request.get_perf_counts() + request.infer({0: img}) + pc = request.get_profiling_info() + assert pc["29"]["status"] == "EXECUTED" assert pc["29"]["layer_type"] == "FullyConnected" del exec_net del ie_core - del net -@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", - reason=f"Can't run test on device {os.environ.get('TEST_DEVICE', 'CPU')}, " - "Dynamic batch fully supported only on CPU") -@pytest.mark.skip(reason="Fix") -def test_set_batch_size(device): +def test_tensor_setter(device): ie_core = Core() - ie_core.set_config({"DYN_BATCH_ENABLED": "YES"}, device) func = ie_core.read_model(test_net_xml, test_net_bin) - func.batch_size = 10 - data = np.ones(shape=net.input_info["data"].input_data.shape) - exec_net = ie_core.compile_model(net, device) - data[0] = read_image()[0] - request = exec_net.create_infer_request() - request.set_batch(1) - td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW") - input_blob = Blob(td, data) - request.set_input({"data": input_blob}) - request.infer() - assert np.allclose(int(round(request.output_blobs["fc_out"].buffer[0][2])), 1), \ - "Incorrect data for 1st batch" - del exec_net - del ie_core - del net + exec_net_1 = ie_core.compile_model(network=func, device_name=device) + exec_net_2 = ie_core.compile_model(network=func, device_name=device) + img = read_image() + tensor = Tensor(img) -@pytest.mark.skip(reason="Fix") -def test_set_zero_batch_size(device): - ie_core = Core() - func = ie_core.read_model(test_net_xml, test_net_bin) - exec_net = ie_core.compile_model(func, device) - request = exec_net.create_infer_request() - with pytest.raises(ValueError) as e: - request.set_batch(0) - assert "Batch size should be positive integer number but 0 specified" in str(e.value) - del exec_net - del ie_core - del func + request1 = exec_net_1.create_infer_request() + request1.set_tensor("data", tensor) + t1 = request1.get_tensor("data") + + assert np.allclose(tensor.data, t1.data, atol=1e-2, rtol=1e-2) + + res = request1.infer({0: tensor}) + res_1 = np.sort(res[0]) + t2 = request1.get_tensor("fc_out") + assert np.allclose(t2.data, res[0].data, atol=1e-2, rtol=1e-2) + + request = exec_net_2.create_infer_request() + res = request.infer({"data": tensor}) + res_2 = np.sort(request.get_tensor("fc_out").data) + assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2) + + request.set_tensor("data", tensor) + t3 = request.get_tensor("data") + assert np.allclose(t3.data, t1.data, atol=1e-2, rtol=1e-2) -@pytest.mark.skip(reason="Fix") -def test_set_negative_batch_size(device): +def test_set_tensors(device): ie_core = Core() func = ie_core.read_model(test_net_xml, test_net_bin) exec_net = ie_core.compile_model(func, device) + + data1 = read_image() + tensor1 = Tensor(data1) + data2 = np.ones(shape=(1, 10), dtype=np.float32) + tensor2 = Tensor(data2) + data3 = np.ones(shape=(1, 3, 32, 32), dtype=np.float32) + tensor3 = Tensor(data3) + data4 = np.zeros(shape=(1, 10), dtype=np.float32) + tensor4 = Tensor(data4) + request = exec_net.create_infer_request() - with pytest.raises(ValueError) as e: - request.set_batch(-1) - assert "Batch size should be positive integer number but -1 specified" in str(e.value) - del exec_net - del ie_core - del func + request.set_tensors({"data": tensor1, "fc_out": tensor2}) + t1 = request.get_tensor("data") + t2 = request.get_tensor("fc_out") + assert np.allclose(tensor1.data, t1.data, atol=1e-2, rtol=1e-2) + assert np.allclose(tensor2.data, t2.data, atol=1e-2, rtol=1e-2) + request.set_output_tensors({0: tensor2}) + output_node = exec_net.outputs[0] + t3 = request.get_tensor(output_node) + assert np.allclose(tensor2.data, t3.data, atol=1e-2, rtol=1e-2) -def test_blob_setter(device): - ie_core = Core() - func = ie_core.read_model(test_net_xml, test_net_bin) - exec_net_1 = ie_core.compile_model(network=func, device_name=device) + request.set_input_tensors({0: tensor1}) + output_node = exec_net.inputs[0] + t4 = request.get_tensor(output_node) + assert np.allclose(tensor1.data, t4.data, atol=1e-2, rtol=1e-2) - func.input_info["data"].layout = "NHWC" - exec_net_2 = ie_core.compile_model(network=func, device_name=device) + output_node = exec_net.inputs[0] + request.set_tensor(output_node, tensor3) + t5 = request.get_tensor(output_node) + assert np.allclose(tensor3.data, t5.data, atol=1e-2, rtol=1e-2) - img = read_image() + request.set_input_tensor(tensor3) + t6 = request.get_tensor(request.input_tensors[0]) + assert np.allclose(tensor3.data, t6.data, atol=1e-2, rtol=1e-2) - request1 = exec_net_1.create_infer_request() - tensor_desc = TensorDesc("FP32", [1, 3, img.shape[2], img.shape[3]], "NCHW") - img_blob1 = Blob(tensor_desc, img) - request1.set_input({"data": img_blob1}) - request1.infer() - res_1 = np.sort(request1.get_blob("fc_out").buffer) - - img = np.transpose(img, axes=(0, 2, 3, 1)).astype(np.float32) - tensor_desc = TensorDesc("FP32", [1, 3, 32, 32], "NHWC") - img_blob = Blob(tensor_desc, img) - request = exec_net_2.create_infer_request() - request.set_blob("data", img_blob) - request.infer() - res_2 = np.sort(request.get_blob("fc_out").buffer) - assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2) + request.set_input_tensor(0, tensor1) + t7 = request.get_tensor(request.input_tensors[0]) + assert np.allclose(tensor1.data, t7.data, atol=1e-2, rtol=1e-2) + + request.set_output_tensor(tensor2) + t8 = request.get_tensor(request.output_tensors[0]) + assert np.allclose(tensor2.data, t8.data, atol=1e-2, rtol=1e-2) + + request.set_output_tensor(0, tensor4) + t9 = request.get_tensor(request.output_tensors[0]) + assert np.allclose(tensor4.data, t9.data, atol=1e-2, rtol=1e-2) def test_cancel(device): @@ -117,25 +118,38 @@ def test_cancel(device): func = ie_core.read_model(test_net_xml, test_net_bin) exec_net = ie_core.compile_model(func, device) img = read_image() - td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW") - input_blob = Blob(td, img) request = exec_net.create_infer_request() - def callback(req, code, array): - array.append(42) + def callback(e): + raise Exception(e) - data = [] - request.set_completion_callback(callback, data) - request.set_input({"data": input_blob}) - request.async_infer() + request.set_callback(callback) + request.start_async({0: img}) request.cancel() with pytest.raises(RuntimeError) as e: request.wait() assert "[ INFER_CANCELLED ]" in str(e.value) - # check if callback has executed - assert data == [42] - request.async_infer() - status = request.wait() - assert status == StatusCode.OK - assert data == [42, 42] + request.start_async({"data": img}) + request.cancel() + with pytest.raises(RuntimeError) as e: + request.wait_for(1) + assert "[ INFER_CANCELLED ]" in str(e.value) + + +def test_infer_mixed_keys(device): + ie_core = Core() + func = ie_core.read_model(test_net_xml, test_net_bin) + ie_core.set_config({"PERF_COUNT": "YES"}, device) + exec_net = ie_core.compile_model(func, device) + + img = read_image() + tensor = Tensor(img) + + data2 = np.ones(shape=(1, 10), dtype=np.float32) + tensor2 = Tensor(data2) + + request = exec_net.create_infer_request() + with pytest.raises(TypeError) as e: + request.infer({0: tensor, "fc_out": tensor2}) + assert "incompatible function arguments." in str(e.value) From c6c7d9b2888301050bda9908af56d5df68ee5469 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Wed, 3 Nov 2021 16:50:21 +0300 Subject: [PATCH 05/46] fix get_version --- .../bindings/python/src/pyopenvino/core/containers.cpp | 7 ++----- .../bindings/python/src/pyopenvino/core/containers.hpp | 6 ++++-- runtime/bindings/python/src/pyopenvino/pyopenvino.cpp | 8 ++++---- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/runtime/bindings/python/src/pyopenvino/core/containers.cpp b/runtime/bindings/python/src/pyopenvino/core/containers.cpp index e91765e8f73aec..a9775e0d36c0b0 100644 --- a/runtime/bindings/python/src/pyopenvino/core/containers.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/containers.cpp @@ -1,11 +1,8 @@ - // Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "pyopenvino/core/containers.hpp" - -#include #include PYBIND11_MAKE_OPAQUE(Containers::TensorIndexMap); @@ -16,10 +13,10 @@ namespace py = pybind11; namespace Containers { void regclass_TensorIndexMap(py::module m) { - auto tensor_index_map = py::bind_map(m, "TensorIndexMap"); + py::bind_map(m, "TensorIndexMap"); } void regclass_TensorNameMap(py::module m) { - auto tensor_name_map = py::bind_map(m, "TensorNameMap"); + py::bind_map(m, "TensorNameMap"); } } // namespace Containers diff --git a/runtime/bindings/python/src/pyopenvino/core/containers.hpp b/runtime/bindings/python/src/pyopenvino/core/containers.hpp index 30a8fac6440403..1c7d71e7690787 100644 --- a/runtime/bindings/python/src/pyopenvino/core/containers.hpp +++ b/runtime/bindings/python/src/pyopenvino/core/containers.hpp @@ -4,11 +4,13 @@ #pragma once -#include -#include #include #include +#include + +#include + namespace py = pybind11; namespace Containers { diff --git a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp index e716063d4ee4ca..f57e36da3a45d1 100644 --- a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -5,8 +5,8 @@ #include #include -#include #include +#include #include #include "pyopenvino/graph/axis_set.hpp" @@ -55,9 +55,9 @@ namespace py = pybind11; std::string get_version() { - auto version = InferenceEngine::GetInferenceEngineVersion(); - std::string version_str = std::to_string(version->apiVersion.major) + "."; - version_str += std::to_string(version->apiVersion.minor) + "."; + auto version = ov::get_openvino_version(); + std::string version_str = std::to_string(OPENVINO_VERSION_MAJOR) + "."; + version_str += std::to_string(OPENVINO_VERSION_MINOR) + "."; version_str += version->buildNumber; return version_str; } From dd6dbc914150c6f6296dbf22e1d1b395de504dc9 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Wed, 3 Nov 2021 17:24:35 +0300 Subject: [PATCH 06/46] fix opaque issue --- runtime/bindings/python/src/pyopenvino/core/containers.hpp | 1 + runtime/bindings/python/src/pyopenvino/core/infer_request.cpp | 3 +++ 2 files changed, 4 insertions(+) diff --git a/runtime/bindings/python/src/pyopenvino/core/containers.hpp b/runtime/bindings/python/src/pyopenvino/core/containers.hpp index 1c7d71e7690787..e24e7336236cac 100644 --- a/runtime/bindings/python/src/pyopenvino/core/containers.hpp +++ b/runtime/bindings/python/src/pyopenvino/core/containers.hpp @@ -6,6 +6,7 @@ #include #include +#include #include diff --git a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp index 190fbe0d07b053..32f97e4c45df1b 100644 --- a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp @@ -12,6 +12,9 @@ #include "pyopenvino/core/containers.hpp" +PYBIND11_MAKE_OPAQUE(Containers::TensorIndexMap); +PYBIND11_MAKE_OPAQUE(Containers::TensorNameMap); + namespace py = pybind11; void regclass_InferRequest(py::module m) { From 573d367c2b9dda3a228c562cffd2e50c55a45e32 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Wed, 3 Nov 2021 18:49:25 +0300 Subject: [PATCH 07/46] some cosmetic changes --- .../python/src/pyopenvino/core/core.cpp | 9 - .../tests/test_inference_engine/helpers.py | 35 --- .../tests/test_inference_engine/test_core.py | 67 +++--- .../test_executable_network.py | 199 +++++++++--------- .../test_infer_request.py | 53 +++-- .../test_output_const_node.py | 42 ++-- .../test_inference_engine/test_tensor.py | 14 +- 7 files changed, 201 insertions(+), 218 deletions(-) delete mode 100644 runtime/bindings/python/tests/test_inference_engine/helpers.py diff --git a/runtime/bindings/python/src/pyopenvino/core/core.cpp b/runtime/bindings/python/src/pyopenvino/core/core.cpp index 35c7e54c57c0c5..63d23ca9fc87b6 100644 --- a/runtime/bindings/python/src/pyopenvino/core/core.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/core.cpp @@ -39,15 +39,6 @@ void regclass_Core(py::module m) { py::arg("device_name"), py::arg("config") = py::dict()); - cls.def( - "add_extension", - [](ov::runtime::Core& self, const std::string& extension_path) { - auto extension_ptr = InferenceEngine::make_so_pointer(extension_path); - auto extension = std::dynamic_pointer_cast(extension_ptr); - self.add_extension(extension); - }, - py::arg("extension_path")); - cls.def("get_versions", &ov::runtime::Core::get_versions); cls.def("read_model", diff --git a/runtime/bindings/python/tests/test_inference_engine/helpers.py b/runtime/bindings/python/tests/test_inference_engine/helpers.py deleted file mode 100644 index db48c1e9298da7..00000000000000 --- a/runtime/bindings/python/tests/test_inference_engine/helpers.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (C) 2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import cv2 -import os - - -def image_path(): - path_to_repo = os.environ["DATA_PATH"] - path_to_img = os.path.join(path_to_repo, "validation_set", "224x224", "dog.bmp") - return path_to_img - - -def model_path(is_myriad=False): - path_to_repo = os.environ["MODELS_PATH"] - if not is_myriad: - test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml") - test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin") - else: - test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml") - test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin") - return (test_xml, test_bin) - - -def read_image(): - n, c, h, w = (1, 3, 32, 32) - image = cv2.imread(image_path()) - if image is None: - raise FileNotFoundError("Input image not found") - - image = cv2.resize(image, (h, w)) / 255 - image = image.transpose((2, 0, 1)).astype(np.float32) - image = image.reshape((n, c, h, w)) - return image diff --git a/runtime/bindings/python/tests/test_inference_engine/test_core.py b/runtime/bindings/python/tests/test_inference_engine/test_core.py index 7035d5e4dcea75..f53bd853ef5ae1 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_core.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_core.py @@ -10,7 +10,6 @@ import openvino.opset8 as ov from openvino import Core, IENetwork, ExecutableNetwork, tensor_from_file from openvino.impl import Function, Shape, Type -from openvino.impl.op import Parameter from openvino import TensorDesc, Blob from ..conftest import model_path, model_onnx_path, plugins_path @@ -40,7 +39,7 @@ def test_blobs(): @pytest.mark.skip(reason="Fix") -def test_ie_core_class(): +def test_core_class(): input_shape = [1, 3, 4, 4] param = ov.parameter(input_shape, np.float32, name="parameter") relu = ov.relu(param, name="relu") @@ -49,9 +48,9 @@ def test_ie_core_class(): cnn_network = IENetwork(func) - ie_core = Core() - ie_core.set_config({}, device_name="CPU") - executable_network = ie_core.compile_model(cnn_network, "CPU", {}) + core = Core() + core.set_config({}, device_name="CPU") + executable_network = core.compile_model(cnn_network, "CPU", {}) td = TensorDesc("FP32", input_shape, "NCHW") @@ -79,65 +78,65 @@ def test_compile_model(device): assert isinstance(exec_net, ExecutableNetwork) -def test_read_model(): - ie_core = Core() - func = ie_core.read_model(model=test_net_xml, weights=test_net_bin) +def test_read_model_from_ir(): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) assert isinstance(func, Function) - func = ie_core.read_model(model=test_net_xml) + func = core.read_model(model=test_net_xml) assert isinstance(func, Function) -def test_read_model_from_blob(): - ie_core = Core() +def test_read_model_from_tensor(): + core = Core() model = open(test_net_xml).read() tensor = tensor_from_file(test_net_bin) - func = ie_core.read_model(model=model, weights=tensor) + func = core.read_model(model=model, weights=tensor) assert isinstance(func, Function) def test_read_model_as_path(): - ie_core = Core() - func = ie_core.read_model(model=Path(test_net_xml), weights=Path(test_net_bin)) + core = Core() + func = core.read_model(model=Path(test_net_xml), weights=Path(test_net_bin)) assert isinstance(func, Function) - func = ie_core.read_model(model=test_net_xml, weights=Path(test_net_bin)) + func = core.read_model(model=test_net_xml, weights=Path(test_net_bin)) assert isinstance(func, Function) - func = ie_core.read_model(model=Path(test_net_xml)) + func = core.read_model(model=Path(test_net_xml)) assert isinstance(func, Function) def test_read_model_from_onnx(): - ie_core = Core() - func = ie_core.read_model(model=test_net_onnx) + core = Core() + func = core.read_model(model=test_net_onnx) assert isinstance(func, Function) def test_read_model_from_onnx_as_path(): - ie_core = Core() - func = ie_core.read_model(model=Path(test_net_onnx)) + core = Core() + func = core.read_model(model=Path(test_net_onnx)) assert isinstance(func, Function) @pytest.mark.xfail("68212") def test_read_net_from_buffer(): - ie_core = Core() + core = Core() with open(test_net_bin, "rb") as f: bin = f.read() with open(model_path()[0], "rb") as f: xml = f.read() - func = ie_core.read_model(model=xml, weights=bin) + func = core.read_model(model=xml, weights=bin) assert isinstance(func, IENetwork) @pytest.mark.xfail("68212") def test_net_from_buffer_valid(): - ie_core = Core() + core = Core() with open(test_net_bin, "rb") as f: bin = f.read() with open(model_path()[0], "rb") as f: xml = f.read() - func = ie_core.read_model(model=xml, weights=bin) - ref_func = ie_core.read_model(model=test_net_xml, weights=test_net_bin) + func = core.read_model(model=xml, weights=bin) + ref_func = core.read_model(model=test_net_xml, weights=test_net_bin) assert func.name == func.name assert func.batch_size == ref_func.batch_size ii_func = func.input_info @@ -252,14 +251,12 @@ def test_register_plugins(): "the registered plugin with name 'CUSTOM' " \ "registred in the XML file" +#@pytest.mark.skip(reason="Need to figure out if it's expected behaviour (fails with C++ API as well") +def test_unregister_plugin(device): + ie = Core() + ie.unload_plugin(device) + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + with pytest.raises(RuntimeError) as e: + ie.load_network(func, device) + assert f"Device with '{device}' name is not registered in the InferenceEngine" in str(e.value) -def test_create_IENetwork_from_nGraph(): - element_type = Type.f32 - param = Parameter(element_type, Shape([1, 3, 22, 22])) - relu = ov.relu(param) - func = Function([relu], [param], "test") - cnnNetwork = IENetwork(func) - assert cnnNetwork is not None - func2 = cnnNetwork.get_function() - assert func2 is not None - assert len(func2.get_ops()) == 3 diff --git a/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py b/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py index 79ebaf7b5056c3..0a0e26da930c6b 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py @@ -1,9 +1,9 @@ -import numpy as np +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + import os import pytest -import warnings -import time -from pathlib import Path +import numpy as np from ..conftest import model_path, image_path from openvino.impl import Function, ConstOutput, Shape, PartialShape @@ -11,30 +11,13 @@ from openvino import Core is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD" -path_to_image = image_path() test_net_xml, test_net_bin = model_path(is_myriad) -def image_path(): - path_to_repo = os.environ["DATA_PATH"] - path_to_img = os.path.join(path_to_repo, "validation_set", "224x224", "dog.bmp") - return path_to_img - - -def model_path(is_myriad=False): - path_to_repo = os.environ["MODELS_PATH"] - if not is_myriad: - test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml") - test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin") - else: - test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml") - test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin") - return (test_xml, test_bin) - def read_image(): import cv2 n, c, h, w = (1, 3, 32, 32) - image = cv2.imread(path_to_img) + image = cv2.imread(image_path()) if image is None: raise FileNotFoundError("Input image not found") @@ -44,19 +27,53 @@ def read_image(): return image +def test_get_metric(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + network_name = exec_net.get_metric("NETWORK_NAME") + assert network_name == "test_model" + +@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device dependent test") +def test_get_config(device): + core = Core() + if core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON": + pytest.skip("Can't run on ARM plugin due-to CPU dependent test") + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + config = exec_net.get_config("PERF_COUNT") + assert config == "NO" + + def test_get_runtime_function(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) runtime_func = exec_net.get_runtime_function() assert isinstance(runtime_func, Function) +@pytest.mark.skip(reason="After infer will be implemented") +def test_export_import(): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, "CPU") + exported_net_file = 'exported_model.bin' + exec_net.export_model(network_model=exported_net_file) + assert os.path.exists(exported_net_file) + exec_net = core.import_network(exported_net_file, "CPU") + os.remove(exported_net_file) + img = read_image() + res = exec_net.infer({'data': img}) + assert np.argmax(res['fc_out'][0]) == 3 + del exec_net + del core + def test_get_input_i(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) - input = exec_net.input(0); + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + input = exec_net.input(0) input_node = input.get_node() name = input_node.friendly_name assert isinstance(input, ConstOutput) @@ -64,10 +81,10 @@ def test_get_input_i(device): def test_get_input_tensor_name(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) - input = exec_net.input("data"); + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + input = exec_net.input("data") input_node = input.get_node() name = input_node.friendly_name assert isinstance(input, ConstOutput) @@ -75,10 +92,10 @@ def test_get_input_tensor_name(device): def test_get_input(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) - input = exec_net.input(); + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + input = exec_net.input() input_node = input.get_node() name = input_node.friendly_name assert isinstance(input, ConstOutput) @@ -86,27 +103,26 @@ def test_get_input(device): def test_get_output_i(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) - output = exec_net.output(0); + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + output = exec_net.output(0) assert isinstance(output, ConstOutput) def test_get_output(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) - output = exec_net.output(); - output_node = output.get_node() + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + output = exec_net.output() assert isinstance(output, ConstOutput) def test_input_set_friendly_name(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) - input = exec_net.input("data"); + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + input = exec_net.input("data") input_node = input.get_node() input_node.set_friendly_name("input_1") name = input_node.friendly_name @@ -115,10 +131,10 @@ def test_input_set_friendly_name(device): def test_output_set_friendly_name(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) - output = exec_net.output(0); + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + output = exec_net.output(0) output_node = output.get_node() output_node.set_friendly_name("output_1") name = output_node.friendly_name @@ -127,78 +143,70 @@ def test_output_set_friendly_name(device): def test_outputs(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) outputs = exec_net.outputs assert isinstance(outputs, list) assert len(outputs) == 1 def test_outputs_items(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) outputs = exec_net.outputs assert isinstance(outputs[0], ConstOutput) def test_output_type(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) output = exec_net.output(0) output_type = output.get_element_type().get_type_name() assert output_type == "f32" + def test_output_shape(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) output = exec_net.output(0) expected_shape = Shape([1, 10]) assert str(output.get_shape()) == str(expected_shape) def test_input_get_index(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) input = exec_net.input(0) expected_idx = 0 assert input.get_index() == expected_idx - - -def test_input_get_index(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) - input = exec_net.input(0) - expected_partial_shape = PartialShape([1, 3, 32 ,32]) - assert input.get_partial_shape() == expected_partial_shape def test_inputs(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) inputs = exec_net.inputs assert isinstance(inputs, list) assert len(inputs) == 1 def test_inputs_items(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) inputs = exec_net.inputs assert isinstance(inputs[0], ConstOutput) def test_inputs_get_friendly_name(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) inputs = exec_net.inputs input_0 = inputs[0] node = input_0.get_node() @@ -207,9 +215,9 @@ def test_inputs_get_friendly_name(device): def test_inputs_set_friendly_name(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) inputs = exec_net.inputs input_0 = inputs[0] node = input_0.get_node() @@ -219,11 +227,10 @@ def test_inputs_set_friendly_name(device): def test_inputs_docs(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) inputs = exec_net.inputs input_0 = inputs[0] - exptected_string = "openvino.impl.ConstOutput wraps ov::Output" - assert input_0.__doc__ == exptected_string - + expected_string = "openvino.impl.ConstOutput wraps ov::Output" + assert input_0.__doc__ == expected_string diff --git a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py index 1b2cac4a627881..23718afa1a479d 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py @@ -5,20 +5,31 @@ import os import pytest -from tests.test_inference_engine.helpers import model_path, read_image +from ..conftest import image_path, model_path from openvino import Core, Tensor - is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD" test_net_xml, test_net_bin = model_path(is_myriad) +def read_image(): + n, c, h, w = (1, 3, 32, 32) + image = cv2.imread(image_path()) + if image is None: + raise FileNotFoundError("Input image not found") + + image = cv2.resize(image, (h, w)) / 255 + image = image.transpose((2, 0, 1)).astype(np.float32) + image = image.reshape((n, c, h, w)) + return image + + @pytest.mark.skip(reason="ProfilingInfo has to be bound") def test_get_profiling_info(device): - ie_core = Core() - func = ie_core.read_model(test_net_xml, test_net_bin) - ie_core.set_config({"PERF_COUNT": "YES"}, device) - exec_net = ie_core.compile_model(func, device) + core = Core() + func = core.read_model(test_net_xml, test_net_bin) + core.set_config({"PERF_COUNT": "YES"}, device) + exec_net = core.compile_model(func, device) img = read_image() request = exec_net.create_infer_request() request.infer({0: img}) @@ -27,14 +38,14 @@ def test_get_profiling_info(device): assert pc["29"]["status"] == "EXECUTED" assert pc["29"]["layer_type"] == "FullyConnected" del exec_net - del ie_core + del core def test_tensor_setter(device): - ie_core = Core() - func = ie_core.read_model(test_net_xml, test_net_bin) - exec_net_1 = ie_core.compile_model(network=func, device_name=device) - exec_net_2 = ie_core.compile_model(network=func, device_name=device) + core = Core() + func = core.read_model(test_net_xml, test_net_bin) + exec_net_1 = core.compile_model(network=func, device_name=device) + exec_net_2 = core.compile_model(network=func, device_name=device) img = read_image() tensor = Tensor(img) @@ -61,9 +72,9 @@ def test_tensor_setter(device): def test_set_tensors(device): - ie_core = Core() - func = ie_core.read_model(test_net_xml, test_net_bin) - exec_net = ie_core.compile_model(func, device) + core = Core() + func = core.read_model(test_net_xml, test_net_bin) + exec_net = core.compile_model(func, device) data1 = read_image() tensor1 = Tensor(data1) @@ -114,9 +125,9 @@ def test_set_tensors(device): def test_cancel(device): - ie_core = Core() - func = ie_core.read_model(test_net_xml, test_net_bin) - exec_net = ie_core.compile_model(func, device) + core = Core() + func = core.read_model(test_net_xml, test_net_bin) + exec_net = core.compile_model(func, device) img = read_image() request = exec_net.create_infer_request() @@ -138,10 +149,10 @@ def callback(e): def test_infer_mixed_keys(device): - ie_core = Core() - func = ie_core.read_model(test_net_xml, test_net_bin) - ie_core.set_config({"PERF_COUNT": "YES"}, device) - exec_net = ie_core.compile_model(func, device) + core = Core() + func = core.read_model(test_net_xml, test_net_bin) + core.set_config({"PERF_COUNT": "YES"}, device) + exec_net = core.compile_model(func, device) img = read_image() tensor = Tensor(img) diff --git a/runtime/bindings/python/tests/test_inference_engine/test_output_const_node.py b/runtime/bindings/python/tests/test_inference_engine/test_output_const_node.py index 522e516a7b0a8e..c0da75bcc4fd0a 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_output_const_node.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_output_const_node.py @@ -24,60 +24,60 @@ def model_path(is_myriad=False): def test_const_output_type(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) node = exec_net.input(0) assert isinstance(node, ConstOutput) def test_const_output_docs(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) node = exec_net.input(0) exptected_string = "openvino.impl.ConstOutput wraps ov::Output" assert node.__doc__ == exptected_string def test_const_output_get_index(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) node = exec_net.input("data") assert node.get_index() == 0 def test_const_output_get_element_type(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) node = exec_net.input("data") assert node.get_element_type() == Type.f32 def test_const_output_get_shape(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) node = exec_net.input("data") expected_shape = Shape([1, 3, 32, 32]) assert str(node.get_shape()) == str(expected_shape) def test_const_output_get_partial_shape(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) node = exec_net.input("data") expected_partial_shape = PartialShape([1, 3, 32, 32]) assert node.get_partial_shape() == expected_partial_shape def test_const_output_get_target_inputs(device): - ie = Core() - func = ie.read_model(model=test_net_xml, weights=test_net_bin) - exec_net = ie.compile_model(func, device) + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) outputs = exec_net.outputs for node in outputs: assert isinstance(node.get_target_inputs(), set) diff --git a/runtime/bindings/python/tests/test_inference_engine/test_tensor.py b/runtime/bindings/python/tests/test_inference_engine/test_tensor.py index d840bfa71f4a34..a46dc1fededd82 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_tensor.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_tensor.py @@ -4,11 +4,23 @@ import numpy as np import pytest -from tests.test_inference_engine.helpers import read_image +from ..conftest import image_path from openvino import Tensor import openvino as ov +def read_image(): + n, c, h, w = (1, 3, 32, 32) + image = cv2.imread(image_path()) + if image is None: + raise FileNotFoundError("Input image not found") + + image = cv2.resize(image, (h, w)) / 255 + image = image.transpose((2, 0, 1)).astype(np.float32) + image = image.reshape((n, c, h, w)) + return image + + @pytest.mark.parametrize("ov_type, numpy_dtype", [ (ov.impl.Type.f32, np.float32), (ov.impl.Type.f64, np.float64), From 9c06d10085e044822ca442abb782dc3ae0805d28 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Wed, 3 Nov 2021 21:08:17 +0300 Subject: [PATCH 08/46] fix codestyle in tests --- .../python/src/pyopenvino/core/containers.cpp | 1 + .../bindings/python/src/pyopenvino/pyopenvino.cpp | 3 +-- runtime/bindings/python/tests/runtime.py | 3 ++- .../python/tests/test_inference_engine/test_core.py | 10 ++++++---- .../test_inference_engine/test_executable_network.py | 12 +++++++----- .../test_inference_engine/test_infer_request.py | 1 + .../test_inference_engine/test_output_const_node.py | 1 - .../tests/test_inference_engine/test_tensor.py | 2 ++ 8 files changed, 20 insertions(+), 13 deletions(-) diff --git a/runtime/bindings/python/src/pyopenvino/core/containers.cpp b/runtime/bindings/python/src/pyopenvino/core/containers.cpp index a9775e0d36c0b0..12a50b1caf5331 100644 --- a/runtime/bindings/python/src/pyopenvino/core/containers.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/containers.cpp @@ -3,6 +3,7 @@ // #include "pyopenvino/core/containers.hpp" + #include PYBIND11_MAKE_OPAQUE(Containers::TensorIndexMap); diff --git a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp index f57e36da3a45d1..5f9e66c064d89f 100644 --- a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -31,9 +31,8 @@ #include "pyopenvino/core/ie_network.hpp" #include "pyopenvino/core/ie_parameter.hpp" #include "pyopenvino/core/ie_preprocess_info.hpp" -#include "pyopenvino/core/offline_transformations.hpp" -#include "pyopenvino/core/version.hpp" #include "pyopenvino/core/infer_request.hpp" +#include "pyopenvino/core/offline_transformations.hpp" #include "pyopenvino/core/tensor.hpp" #include "pyopenvino/core/tensor_description.hpp" #include "pyopenvino/core/version.hpp" diff --git a/runtime/bindings/python/tests/runtime.py b/runtime/bindings/python/tests/runtime.py index 2b79cf3af5af50..15dc3fbf9f57e0 100644 --- a/runtime/bindings/python/tests/runtime.py +++ b/runtime/bindings/python/tests/runtime.py @@ -7,7 +7,8 @@ from typing import Dict, List, Union import numpy as np -from openvino import Core, IENetwork, Blob, DataPtr + +from openvino import Core, Blob from openvino.exceptions import UserInputError from openvino.impl import Function, Node, PartialShape, Type diff --git a/runtime/bindings/python/tests/test_inference_engine/test_core.py b/runtime/bindings/python/tests/test_inference_engine/test_core.py index f53bd853ef5ae1..3ff6ddac7c3477 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_core.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_core.py @@ -9,7 +9,7 @@ import openvino.opset8 as ov from openvino import Core, IENetwork, ExecutableNetwork, tensor_from_file -from openvino.impl import Function, Shape, Type +from openvino.impl import Function from openvino import TensorDesc, Blob from ..conftest import model_path, model_onnx_path, plugins_path @@ -118,6 +118,7 @@ def test_read_model_from_onnx_as_path(): func = core.read_model(model=Path(test_net_onnx)) assert isinstance(func, Function) + @pytest.mark.xfail("68212") def test_read_net_from_buffer(): core = Core() @@ -128,6 +129,7 @@ def test_read_net_from_buffer(): func = core.read_model(model=xml, weights=bin) assert isinstance(func, IENetwork) + @pytest.mark.xfail("68212") def test_net_from_buffer_valid(): core = Core() @@ -249,9 +251,10 @@ def test_register_plugins(): assert isinstance(exec_net, ExecutableNetwork), "Cannot load the network to " \ "the registered plugin with name 'CUSTOM' " \ - "registred in the XML file" + "registered in the XML file" -#@pytest.mark.skip(reason="Need to figure out if it's expected behaviour (fails with C++ API as well") + +# @pytest.mark.skip(reason="Need to figure out if it's expected behaviour (fails with C++ API as well") def test_unregister_plugin(device): ie = Core() ie.unload_plugin(device) @@ -259,4 +262,3 @@ def test_unregister_plugin(device): with pytest.raises(RuntimeError) as e: ie.load_network(func, device) assert f"Device with '{device}' name is not registered in the InferenceEngine" in str(e.value) - diff --git a/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py b/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py index 0a0e26da930c6b..ec1eaf2f8112b9 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py @@ -6,7 +6,7 @@ import numpy as np from ..conftest import model_path, image_path -from openvino.impl import Function, ConstOutput, Shape, PartialShape +from openvino.impl import Function, ConstOutput, Shape from openvino import Core @@ -34,6 +34,7 @@ def test_get_metric(device): network_name = exec_net.get_metric("NETWORK_NAME") assert network_name == "test_model" + @pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device dependent test") def test_get_config(device): core = Core() @@ -52,19 +53,20 @@ def test_get_runtime_function(device): runtime_func = exec_net.get_runtime_function() assert isinstance(runtime_func, Function) + @pytest.mark.skip(reason="After infer will be implemented") def test_export_import(): core = Core() func = core.read_model(model=test_net_xml, weights=test_net_bin) exec_net = core.compile_model(func, "CPU") - exported_net_file = 'exported_model.bin' + exported_net_file = "exported_model.bin" exec_net.export_model(network_model=exported_net_file) assert os.path.exists(exported_net_file) exec_net = core.import_network(exported_net_file, "CPU") os.remove(exported_net_file) img = read_image() - res = exec_net.infer({'data': img}) - assert np.argmax(res['fc_out'][0]) == 3 + res = exec_net.infer({"data": img}) + assert np.argmax(res["fc_out"][0]) == 3 del exec_net del core @@ -184,7 +186,7 @@ def test_input_get_index(device): input = exec_net.input(0) expected_idx = 0 assert input.get_index() == expected_idx - + def test_inputs(device): core = Core() diff --git a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py index 23718afa1a479d..9413dcf3a77c6e 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py @@ -13,6 +13,7 @@ def read_image(): + import cv2 n, c, h, w = (1, 3, 32, 32) image = cv2.imread(image_path()) if image is None: diff --git a/runtime/bindings/python/tests/test_inference_engine/test_output_const_node.py b/runtime/bindings/python/tests/test_inference_engine/test_output_const_node.py index c0da75bcc4fd0a..d7d1a2eea68a36 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_output_const_node.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_output_const_node.py @@ -81,4 +81,3 @@ def test_const_output_get_target_inputs(device): outputs = exec_net.outputs for node in outputs: assert isinstance(node.get_target_inputs(), set) - diff --git a/runtime/bindings/python/tests/test_inference_engine/test_tensor.py b/runtime/bindings/python/tests/test_inference_engine/test_tensor.py index a46dc1fededd82..081334013a512b 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_tensor.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_tensor.py @@ -10,6 +10,8 @@ def read_image(): + import cv2 + n, c, h, w = (1, 3, 32, 32) image = cv2.imread(image_path()) if image is None: From 5b2b9a0fbc206e4d21166af6753e9711c2db9b94 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Thu, 4 Nov 2021 00:48:25 +0300 Subject: [PATCH 09/46] make tests green --- .../python/src/pyopenvino/core/common.cpp | 42 ++++++++ .../python/src/pyopenvino/core/common.hpp | 8 ++ .../src/pyopenvino/core/infer_request.cpp | 97 +++++++------------ .../tests/test_inference_engine/test_core.py | 2 +- .../test_infer_request.py | 2 +- 5 files changed, 89 insertions(+), 62 deletions(-) diff --git a/runtime/bindings/python/src/pyopenvino/core/common.cpp b/runtime/bindings/python/src/pyopenvino/core/common.cpp index 098a85f2ffc95a..9350640917c6da 100644 --- a/runtime/bindings/python/src/pyopenvino/core/common.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/common.cpp @@ -211,6 +211,48 @@ bool is_TBlob(const py::handle& blob) { } } +const ov::runtime::Tensor& cast_to_tensor(const py::handle& tensor) { + return tensor.cast(); +} + +const Containers::TensorNameMap cast_to_tensor_name_map(const py::dict& inputs) { + Containers::TensorNameMap result_map; + for (auto&& input : inputs) { + std::string name; + if (py::isinstance(input.first)) { + name = input.first.cast(); + } else { + throw py::type_error("incompatible function arguments!"); + } + if (py::isinstance(input.second)) { + auto tensor = Common::cast_to_tensor(input.second); + result_map[name] = tensor; + } else { + throw ov::Exception("Unable to cast tensor " + name + "!"); + } + } + return result_map; +} + +const Containers::TensorIndexMap cast_to_tensor_index_map(const py::dict& inputs) { + Containers::TensorIndexMap result_map; + for (auto&& input : inputs) { + int idx; + if (py::isinstance(input.first)) { + idx = input.first.cast(); + } else { + throw py::type_error("incompatible function arguments!"); + } + if (py::isinstance(input.second)) { + auto tensor = Common::cast_to_tensor(input.second); + result_map[idx] = tensor; + } else { + throw ov::Exception("Unable to cast tensor " + std::to_string(idx) + "!"); + } + } + return result_map; +} + const std::shared_ptr cast_to_blob(const py::handle& blob) { if (py::isinstance>(blob)) { return blob.cast>&>(); diff --git a/runtime/bindings/python/src/pyopenvino/core/common.hpp b/runtime/bindings/python/src/pyopenvino/core/common.hpp index f9ca68fdabc982..fb7a47e6be15af 100644 --- a/runtime/bindings/python/src/pyopenvino/core/common.hpp +++ b/runtime/bindings/python/src/pyopenvino/core/common.hpp @@ -14,6 +14,8 @@ #include #include "Python.h" #include "ie_common.h" +#include "openvino/runtime/tensor.hpp" +#include "pyopenvino/core/containers.hpp" namespace py = pybind11; @@ -48,6 +50,12 @@ namespace Common const std::shared_ptr cast_to_blob(const py::handle& blob); + const Containers::TensorNameMap cast_to_tensor_name_map(const py::dict& inputs); + + const Containers::TensorIndexMap cast_to_tensor_index_map(const py::dict& inputs); + + const ov::runtime::Tensor& cast_to_tensor(const py::handle& tensor); + void blob_from_numpy(const py::handle& _arr, InferenceEngine::Blob::Ptr &blob); void set_request_blobs(InferenceEngine::InferRequest& request, const py::dict& dictonary); diff --git a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp index 32f97e4c45df1b..75c6d51b40476e 100644 --- a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp @@ -10,6 +10,7 @@ #include +#include "pyopenvino/core/common.hpp" #include "pyopenvino/core/containers.hpp" PYBIND11_MAKE_OPAQUE(Containers::TensorIndexMap); @@ -21,8 +22,9 @@ void regclass_InferRequest(py::module m) { py::class_> cls(m, "InferRequest"); cls.def( "set_tensors", - [](InferRequestWrapper& self, const Containers::TensorNameMap& inputs) { - for (auto&& input : inputs) { + [](InferRequestWrapper& self, const py::dict& inputs) { + auto tensor_map = Common::cast_to_tensor_name_map(inputs); + for (auto&& input : tensor_map) { self._request.set_tensor(input.first, input.second); } }, @@ -30,8 +32,9 @@ void regclass_InferRequest(py::module m) { cls.def( "set_output_tensors", - [](InferRequestWrapper& self, const Containers::TensorIndexMap& outputs) { - for (auto&& output : outputs) { + [](InferRequestWrapper& self, const py::dict& outputs) { + auto outputs_map = Common::cast_to_tensor_index_map(outputs); + for (auto&& output : outputs_map) { self._request.set_output_tensor(output.first, output.second); } }, @@ -39,8 +42,9 @@ void regclass_InferRequest(py::module m) { cls.def( "set_input_tensors", - [](InferRequestWrapper& self, const Containers::TensorIndexMap& inputs) { - for (auto&& input : inputs) { + [](InferRequestWrapper& self, const py::dict& inputs) { + auto inputs_map = Common::cast_to_tensor_index_map(inputs); + for (auto&& input : inputs_map) { self._request.set_input_tensor(input.first, input.second); } }, @@ -48,29 +52,20 @@ void regclass_InferRequest(py::module m) { cls.def( "_infer", - [](InferRequestWrapper& self, const Containers::TensorIndexMap& inputs) { + [](InferRequestWrapper& self, const py::dict& inputs) { // Update inputs if there are any - for (auto&& input : inputs) { - self._request.set_input_tensor(input.first, input.second); - } - // Call Infer function - self._start_time = Time::now(); - self._request.infer(); - self._end_time = Time::now(); - Containers::InferResults results; - for (auto& out : self._outputs) { - results.push_back(self._request.get_tensor(out)); - } - return results; - }, - py::arg("inputs")); - - cls.def( - "_infer", - [](InferRequestWrapper& self, const Containers::TensorNameMap& inputs) { - // Update inputs if there are any - for (auto&& input : inputs) { - self._request.set_tensor(input.first, input.second); + if (!inputs.empty()) { + if (py::isinstance(inputs.begin()->first)) { + auto inputs_map = Common::cast_to_tensor_name_map(inputs); + for (auto&& input : inputs_map) { + self._request.set_tensor(input.first, input.second); + } + } else if (py::isinstance(inputs.begin()->first)) { + auto inputs_map = Common::cast_to_tensor_index_map(inputs); + for (auto&& input : inputs_map) { + self._request.set_input_tensor(input.first, input.second); + } + } } // Call Infer function self._start_time = Time::now(); @@ -86,40 +81,22 @@ void regclass_InferRequest(py::module m) { cls.def( "_start_async", - [](InferRequestWrapper& self, const Containers::TensorIndexMap& inputs) { + [](InferRequestWrapper& self, const py::dict& inputs) { py::gil_scoped_release release; - for (auto&& input : inputs) { - self._request.set_input_tensor(input.first, input.second); - } - // TODO: check for None so next async infer userdata can be updated - // if (!userdata.empty()) - // { - // if (user_callback_defined) - // { - // self._request.SetCompletionCallback([self, userdata]() { - // // py::gil_scoped_acquire acquire; - // auto statusCode = const_cast(self).Wait( - // InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); - // self._request.user_callback(self, statusCode, userdata); - // // py::gil_scoped_release release; - // }); - // } - // else - // { - // py::print("There is no callback function!"); - // } - // } - self._start_time = Time::now(); - self._request.start_async(); - }, - py::arg("inputs")); - cls.def( - "_start_async", - [](InferRequestWrapper& self, const Containers::TensorNameMap& inputs) { - py::gil_scoped_release release; - for (auto&& input : inputs) { - self._request.set_tensor(input.first, input.second); + // Update inputs if there are any + if (!inputs.empty()) { + if (py::isinstance(inputs.begin()->first)) { + auto inputs_map = Common::cast_to_tensor_name_map(inputs); + for (auto&& input : inputs_map) { + self._request.set_tensor(input.first, input.second); + } + } else if (py::isinstance(inputs.begin()->first)) { + auto inputs_map = Common::cast_to_tensor_index_map(inputs); + for (auto&& input : inputs_map) { + self._request.set_input_tensor(input.first, input.second); + } + } } // TODO: check for None so next async infer userdata can be updated // if (!userdata.empty()) diff --git a/runtime/bindings/python/tests/test_inference_engine/test_core.py b/runtime/bindings/python/tests/test_inference_engine/test_core.py index 3ff6ddac7c3477..b098e5056aff09 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_core.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_core.py @@ -254,7 +254,7 @@ def test_register_plugins(): "registered in the XML file" -# @pytest.mark.skip(reason="Need to figure out if it's expected behaviour (fails with C++ API as well") +@pytest.mark.skip(reason="Need to figure out if it's expected behaviour (fails with C++ API as well") def test_unregister_plugin(device): ie = Core() ie.unload_plugin(device) diff --git a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py index 9413dcf3a77c6e..a33c3d599b33c7 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py @@ -164,4 +164,4 @@ def test_infer_mixed_keys(device): request = exec_net.create_infer_request() with pytest.raises(TypeError) as e: request.infer({0: tensor, "fc_out": tensor2}) - assert "incompatible function arguments." in str(e.value) + assert "incompatible function arguments!" in str(e.value) From 5fe62ee391a1d458cd3413a7803a35aae7b6a2da Mon Sep 17 00:00:00 2001 From: jiwaszki Date: Fri, 5 Nov 2021 01:32:59 +0000 Subject: [PATCH 10/46] Extend python InferRequest --- .../src/pyopenvino/core/infer_request.cpp | 42 ++++++++++++++++++- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp index 75c6d51b40476e..f3885bcdbfa633 100644 --- a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp @@ -178,6 +178,28 @@ void regclass_InferRequest(py::module m) { }, py::arg("port")); + cls.def( + "get_input_tensor", + [](InferRequestWrapper& self, size_t idx) { + return self._request.get_input_tensor(idx); + }, + py::arg("idx")); + + cls.def("get_input_tensor", [](InferRequestWrapper& self) { + return self._request.get_input_tensor(); + }); + + cls.def( + "get_output_tensor", + [](InferRequestWrapper& self, size_t idx) { + return self._request.get_output_tensor(idx); + }, + py::arg("idx")); + + cls.def("get_output_tensor", [](InferRequestWrapper& self) { + return self._request.get_output_tensor(); + }); + cls.def( "set_tensor", [](InferRequestWrapper& self, const std::string& name, const ov::runtime::Tensor& tensor) { @@ -232,14 +254,30 @@ void regclass_InferRequest(py::module m) { }, py::arg("tensor")); - cls.def_property_readonly("input_tensors", [](InferRequestWrapper& self) { + cls.def_property_readonly("inputs", [](InferRequestWrapper& self) { return self._inputs; }); - cls.def_property_readonly("output_tensors", [](InferRequestWrapper& self) { + cls.def_property_readonly("outputs", [](InferRequestWrapper& self) { return self._outputs; }); + cls.def_property_readonly("input_tensors", [](InferRequestWrapper& self) { + std::vector tensors; + for (auto&& node : self._inputs) { + tensors.push_back(self._request.get_tensor(node)); + } + return tensors; + }); + + cls.def_property_readonly("output_tensors", [](InferRequestWrapper& self) { + std::vector tensors; + for (auto&& node : self._outputs) { + tensors.push_back(self._request.get_tensor(node)); + } + return tensors; + }); + cls.def_property_readonly("latency", [](InferRequestWrapper& self) { return self.get_latency(); }); From 6998cbde37b4bf66aa236d1caf7dc3c580d0a6b9 Mon Sep 17 00:00:00 2001 From: jiwaszki Date: Fri, 5 Nov 2021 01:35:18 +0000 Subject: [PATCH 11/46] Extend python Function --- .../python/src/pyopenvino/graph/function.cpp | 68 ++++++++++++++++--- 1 file changed, 60 insertions(+), 8 deletions(-) diff --git a/runtime/bindings/python/src/pyopenvino/graph/function.cpp b/runtime/bindings/python/src/pyopenvino/graph/function.cpp index 3496e88d0149dd..37afccb684e159 100644 --- a/runtime/bindings/python/src/pyopenvino/graph/function.cpp +++ b/runtime/bindings/python/src/pyopenvino/graph/function.cpp @@ -7,6 +7,7 @@ #include #include +#include "openvino/core/partial_shape.hpp" #include "openvino/op/parameter.hpp" // ov::op::v0::Parameter #include "openvino/op/sink.hpp" #include "pyopenvino/graph/function.hpp" @@ -15,6 +16,16 @@ namespace py = pybind11; static const char* CAPSULE_NAME = "ngraph_function"; +void set_tensor_names(const ov::ParameterVector& parameters) { + for (const auto& param : parameters) { + ov::Output p = param; + if (p.get_node()->output(0).get_names().empty()) { + std::unordered_set p_names({p.get_node()->get_friendly_name()}); + p.get_node()->output(0).set_names(p_names); + } + } +} + void regclass_graph_Function(py::module m) { py::class_> function(m, "Function", py::module_local()); function.doc() = "openvino.impl.Function wraps ov::Function"; @@ -53,12 +64,15 @@ void regclass_graph_Function(py::module m) { String to set as function's friendly name. )"); - function.def(py::init>&, - const std::vector>&, - const std::string&>(), + function.def(py::init([](const std::vector>& results, + const ov::ParameterVector& parameters, + const std::string& name) { + set_tensor_names(parameters); + return std::make_shared(results, parameters, name); + }), py::arg("results"), py::arg("parameters"), - py::arg("name"), + py::arg("name") = "", R"( Create user-defined Function which is a representation of a model. @@ -74,12 +88,15 @@ void regclass_graph_Function(py::module m) { String to set as function's friendly name. )"); - function.def(py::init&, - const std::vector>&, - const std::string&>(), + function.def(py::init([](const std::shared_ptr& results, + const ov::ParameterVector& parameters, + const std::string& name) { + set_tensor_names(parameters); + return std::make_shared(results, parameters, name); + }), py::arg("result"), py::arg("parameters"), - py::arg("name"), + py::arg("name") = "", R"( Create user-defined Function which is a representation of a model. @@ -94,6 +111,41 @@ void regclass_graph_Function(py::module m) { name : str String to set as function's friendly name. )"); + + function.def( + "reshape", + [](ov::Function& self, const std::map& partial_shapes) { + self.reshape(partial_shapes); + }, + py::arg("partial_shapes"), + R"( + Parameters + ---------- + partial_shapes : Dict[string, PartialShape] + Index of Output. + + Returns + ---------- + reshape : void + )"); + + function.def( + "reshape", + [](ov::Function& self, const std::map, ov::PartialShape>& partial_shapes) { + self.reshape(partial_shapes); + }, + py::arg("partial_shapes"), + R"( + Parameters + ---------- + partial_shapes : Dict[Output, PartialShape] + Index of Output. + + Returns + ---------- + reshape : void + )"); + function.def("get_output_size", &ov::Function::get_output_size, R"( From c541d2d632e784c37679ae63fe7a02c557b6dd84 Mon Sep 17 00:00:00 2001 From: jiwaszki Date: Fri, 5 Nov 2021 01:36:10 +0000 Subject: [PATCH 12/46] Change return value of infer call --- runtime/bindings/python/src/openvino/ie_api.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/runtime/bindings/python/src/openvino/ie_api.py b/runtime/bindings/python/src/openvino/ie_api.py index 7a208a0fc926ff..3d15dc52573f3b 100644 --- a/runtime/bindings/python/src/openvino/ie_api.py +++ b/runtime/bindings/python/src/openvino/ie_api.py @@ -43,7 +43,9 @@ def normalize_inputs(py_dict: dict) -> dict: # flake8: noqa: D102 def infer(request: InferRequest, inputs: dict = None) -> np.ndarray: res = request._infer(inputs=normalize_inputs(inputs if inputs is not None else {})) - return np.asarray([copy.deepcopy(tensor.data) for tensor in res]) + # Required to return list since np.ndarray forces all of tensors data to match in + # dimensions. This results in errors when running ops like variadic split. + return [copy.deepcopy(tensor.data) for tensor in res] # flake8: noqa: D102 def start_async(request: InferRequest, inputs: dict = None) -> None: # type: ignore From d83f73f77e7abdec59b1d23239606ba7ac7a00e9 Mon Sep 17 00:00:00 2001 From: jiwaszki Date: Fri, 5 Nov 2021 01:37:46 +0000 Subject: [PATCH 13/46] Fix missing precisions conversions in CPU plugin --- inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp | 8 +++++--- .../src/mkldnn_plugin/nodes/common/cpu_convert.cpp | 8 +++++++- inference-engine/src/mkldnn_plugin/utils/cpu_utils.hpp | 5 +++++ 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp index 37eff38dbc0ec5..f2400105262f34 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp @@ -134,7 +134,7 @@ static void TransformationUpToCPUSpecificOpSet(std::shared_ptr manager.register_pass( std::vector{ ngraph::element::i8, ngraph::element::u8, ngraph::element::i4, ngraph::element::u4 }); } - + // BRUH auto get_convert_precisions = []() { precisions_array array = { {ngraph::element::i64, ngraph::element::i32}, @@ -440,9 +440,11 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std // verification of supported input InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo(); for (const auto &ii : _networkInputs) { - auto input_precision = ii.second->getPrecision(); - if (input_precision != InferenceEngine::Precision::FP32 && + auto input_precision = ii.second->getPrecision(); // BRUH + if (input_precision != InferenceEngine::Precision::FP64 && + input_precision != InferenceEngine::Precision::FP32 && input_precision != InferenceEngine::Precision::I32 && + input_precision != InferenceEngine::Precision::U32 && input_precision != InferenceEngine::Precision::U16 && input_precision != InferenceEngine::Precision::I16 && input_precision != InferenceEngine::Precision::I8 && diff --git a/inference-engine/src/mkldnn_plugin/nodes/common/cpu_convert.cpp b/inference-engine/src/mkldnn_plugin/nodes/common/cpu_convert.cpp index 5b47b476b0a565..8763b551af9597 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/common/cpu_convert.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/common/cpu_convert.cpp @@ -103,7 +103,13 @@ void cpu_convert(const void *srcPtr, void *dstPtr, Precision srcPrc, Precision d MKLDNN_CVT(BF16, I64), MKLDNN_CVT(BF16, FP32), MKLDNN_CVT(BF16, BOOL), MKLDNN_CVT(BOOL, U8), MKLDNN_CVT(BOOL, I8), MKLDNN_CVT(BOOL, U16), MKLDNN_CVT(BOOL, I16), MKLDNN_CVT(BOOL, I32), MKLDNN_CVT(BOOL, U64), - MKLDNN_CVT(BOOL, I64), MKLDNN_CVT(BOOL, FP32), MKLDNN_CVT(BOOL, BF16)); + MKLDNN_CVT(BOOL, I64), MKLDNN_CVT(BOOL, FP32), MKLDNN_CVT(BOOL, BF16), + MKLDNN_CVT(FP64, U8), MKLDNN_CVT(FP64, I8), MKLDNN_CVT(FP64, U16), + MKLDNN_CVT(FP64, I16), MKLDNN_CVT(FP64, I32), MKLDNN_CVT(FP64, U64), + MKLDNN_CVT(FP64, I64), MKLDNN_CVT(FP64, FP32), MKLDNN_CVT(FP64, BF16), MKLDNN_CVT(FP64, BOOL), + MKLDNN_CVT(U32, U8), MKLDNN_CVT(U32, I8), MKLDNN_CVT(U32, U16), + MKLDNN_CVT(U32, I16), MKLDNN_CVT(U32, I32), MKLDNN_CVT(U32, U64), + MKLDNN_CVT(U32, I64), MKLDNN_CVT(U32, FP32), MKLDNN_CVT(U32, BF16), MKLDNN_CVT(U32, BOOL)); if (!ctx.converted) IE_THROW() << "cpu_convert can't convert from: " << srcPrc << " precision to: " << dstPrc; diff --git a/inference-engine/src/mkldnn_plugin/utils/cpu_utils.hpp b/inference-engine/src/mkldnn_plugin/utils/cpu_utils.hpp index 14fd043dce0824..acc456f8ba28bf 100644 --- a/inference-engine/src/mkldnn_plugin/utils/cpu_utils.hpp +++ b/inference-engine/src/mkldnn_plugin/utils/cpu_utils.hpp @@ -72,12 +72,17 @@ inline InferenceEngine::Precision normalizeToSupportedPrecision(InferenceEngine: case InferenceEngine::Precision::FP32: { break; } + case InferenceEngine::Precision::FP64: { + precision = InferenceEngine::Precision::FP32; + break; + } case InferenceEngine::Precision::BOOL: { precision = InferenceEngine::Precision::U8; break; } case InferenceEngine::Precision::U16: case InferenceEngine::Precision::I16: + case InferenceEngine::Precision::U32: case InferenceEngine::Precision::I64: case InferenceEngine::Precision::U64: { precision = InferenceEngine::Precision::I32; From 3ddd26477222fd1b3da3c202779595e97a80d64f Mon Sep 17 00:00:00 2001 From: jiwaszki Date: Fri, 5 Nov 2021 01:43:03 +0000 Subject: [PATCH 14/46] Rework of runtime for new tests --- runtime/bindings/python/tests/runtime.py | 63 +++++++----------------- 1 file changed, 19 insertions(+), 44 deletions(-) diff --git a/runtime/bindings/python/tests/runtime.py b/runtime/bindings/python/tests/runtime.py index 15dc3fbf9f57e0..43bfbb3df07841 100644 --- a/runtime/bindings/python/tests/runtime.py +++ b/runtime/bindings/python/tests/runtime.py @@ -33,18 +33,6 @@ def get_runtime(): return runtime() -def _convert_val(val): - """WA converts unsupported input values.""" - if type(val) is np.ndarray: - if val.dtype == np.float64: - return np.array(val, dtype=np.float32) - elif val.dtype == np.int64: - return np.array(val, dtype=np.int32) - return np.array(val) - - return np.array(val, dtype=np.float32) - - class Runtime(object): """Represents an nGraph runtime environment.""" @@ -93,24 +81,6 @@ def __repr__(self) -> str: params_string = ", ".join([param.name for param in self.parameters]) return "".format(self.function.get_name(), params_string) - def _get_ie_output_blob_name(self, outputs: Dict, ng_result: result) -> str: - if len(self.results) == 1: - return next(iter(outputs.keys())) - else: - prev_layer = ng_result.input(0).get_source_output() - out_name = prev_layer.get_node().get_friendly_name() - if prev_layer.get_node().get_output_size() != 1: - out_name += "." + str(prev_layer.get_index()) - return out_name - - def _get_ie_output_blob_buffer(self, output_blobs: Dict[str, Blob], ng_result: result) -> np.ndarray: - out_name = self._get_ie_output_blob_name(output_blobs, ng_result) - out_blob = output_blobs[out_name] - - if out_blob.tensor_desc.layout == "SCALAR": - return out_blob.buffer.reshape(()) - else: - return out_blob.buffer def convert_buffers(self, source_buffers, target_dtypes): converted_buffers = [] @@ -130,19 +100,21 @@ def __call__(self, *input_values: NumericData) -> List[NumericData]: raise UserInputError( "Expected %s params, received not enough %s values.", len(self.parameters), len(input_values) ) - # ignore not needed input values - input_values = input_values[:len(self.parameters)] - - input_values = [_convert_val(input_value) for input_value in input_values] - input_shapes = [get_shape(input_value) for input_value in input_values] + param_types = [param.get_element_type() for param in self.parameters] param_names = [param.friendly_name for param in self.parameters] + # ignore not needed input values + input_values = [ + np.array(input_value[0], dtype=get_dtype(input_value[1])) + for input_value in zip(input_values[: len(self.parameters)], param_types) + ] + input_shapes = [get_shape(input_value) for input_value in input_values] + if self.network_cache.get(str(input_shapes)) is None: function = self.function if self.function.is_dynamic(): - function.reshape(dict(zip(param_names, input_shapes))) - # Convert unsupported inputs of the network + function.reshape(dict(zip(param_names, [PartialShape(i) for i in input_shapes]))) self.network_cache[str(input_shapes)] = function else: function = self.network_cache[str(input_shapes)] @@ -160,13 +132,16 @@ def __call__(self, *input_values: NumericData) -> List[NumericData]: ) request = executable_network.create_infer_request() - request.infer(dict(zip(param_names, input_values))) - - # Set order of output blobs compatible with nG Function - result_buffers = [self._get_ie_output_blob_buffer(request.output_blobs, result) - for result in self.results] - - # Since OV overwrite result data type we have to convert results to the original one. + result_buffers = request.infer(dict(zip(param_names, input_values))) + # # Note: other methods to get result_buffers from request + # # First call infer with no return value: + # request.infer(dict(zip(param_names, input_values))) + # # Now use any of following options: + # result_buffers = [request.get_tensor(n).data for n in request.outputs] + # result_buffers = [request.get_output_tensor(i).data for i in range(len(request.outputs))] + # result_buffers = [t.data for t in request.output_tensors] + + # # Since OV overwrite result data type we have to convert results to the original one. original_dtypes = [get_dtype(result.get_output_element_type(0)) for result in self.results] converted_buffers = self.convert_buffers(result_buffers, original_dtypes) return converted_buffers From 0c6a5192ca0f96c19d5393ebee6f4f9e4603e764 Mon Sep 17 00:00:00 2001 From: jiwaszki Date: Fri, 5 Nov 2021 01:46:56 +0000 Subject: [PATCH 15/46] Fixed onnx reading in python tests --- runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py b/runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py index 7c7c7f2c7d8654..570c82619eda03 100644 --- a/runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py +++ b/runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py @@ -22,6 +22,6 @@ def import_onnx_model(model: onnx.ModelProto) -> Function: onnx.checker.check_model(model) model_byte_string = model.SerializeToString() ie = Core() - func = ie.read_model(bytes(model_byte_string), Tensor(np.array([], dtype=np.uint8))) + func = ie.read_model(bytes(model_byte_string), Tensor(type=np.uint8, shape=[])) return func From 127f0b916d0098999925c8021a77a6420c636aa2 Mon Sep 17 00:00:00 2001 From: jiwaszki Date: Fri, 5 Nov 2021 01:48:02 +0000 Subject: [PATCH 16/46] Edit compatibility tests --- runtime/bindings/python/tests_compatibility/__init__.py | 2 -- .../python/tests_compatibility/test_onnx/test_backend.py | 5 ----- 2 files changed, 7 deletions(-) diff --git a/runtime/bindings/python/tests_compatibility/__init__.py b/runtime/bindings/python/tests_compatibility/__init__.py index 6c7f8d61f34cc1..4def045abc2b49 100644 --- a/runtime/bindings/python/tests_compatibility/__init__.py +++ b/runtime/bindings/python/tests_compatibility/__init__.py @@ -114,8 +114,6 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True): "/openvino/ngraph/src/ngraph/op/util/elementwise_args.cpp:48:") xfail_issue_39658 = xfail_test(reason="RuntimeError: Tile operation has a form that is not supported." " z should be converted to TileIE operation.") -xfail_issue_39659 = xfail_test(reason="RuntimeError: Broadcast operation has a form that is not supported." - " y should be converted to Tile operation.") xfail_issue_39662 = xfail_test(reason="RuntimeError: 'ScatterElementsUpdate' layer with name 'y' have " "indices value that points to non-existing output tensor element") diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/test_backend.py b/runtime/bindings/python/tests_compatibility/test_onnx/test_backend.py index fe34d4cbf06cfa..cb51387ef5ce26 100644 --- a/runtime/bindings/python/tests_compatibility/test_onnx/test_backend.py +++ b/runtime/bindings/python/tests_compatibility/test_onnx/test_backend.py @@ -28,7 +28,6 @@ xfail_issue_38734, xfail_issue_38735, xfail_issue_39658, - xfail_issue_39659, xfail_issue_39662, xfail_issue_44854, xfail_issue_44858, @@ -128,10 +127,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None "OnnxBackendNodeModelTest.test_tile_cpu", "OnnxBackendNodeModelTest.test_tile_precomputed_cpu", ), - ( - xfail_issue_39659, - "OnnxBackendNodeModelTest.test_constantofshape_int_shape_zero_cpu", - ), ( xfail_issue_39662, "OnnxBackendNodeModelTest.test_scatter_elements_with_negative_indices_cpu", From 695104e422e38689e6f0a7b0cf2399d763f16ae1 Mon Sep 17 00:00:00 2001 From: jiwaszki Date: Fri, 5 Nov 2021 01:53:03 +0000 Subject: [PATCH 17/46] Edit tests --- runtime/bindings/python/tests/__init__.py | 3 -- runtime/bindings/python/tests/runtime.py | 4 +- .../test_infer_request.py | 8 +-- .../python/tests/test_ngraph/test_basic.py | 8 ++- .../tests/test_ngraph/test_data_movement.py | 3 -- .../python/tests/test_ngraph/test_ops.py | 2 - .../tests/test_ngraph/test_ops_binary.py | 4 -- .../tests/test_ngraph/test_ops_unary.py | 3 -- .../tests/test_ngraph/test_reduction.py | 3 -- .../test_ngraph/test_sequence_processing.py | 4 +- .../python/tests/test_onnx/test_backend.py | 51 ------------------- .../tests/test_onnx/test_onnx_import.py | 18 ++++++- .../tests/test_onnx/test_ops_logical.py | 9 ++-- .../python/tests/test_onnx/test_ops_unary.py | 7 +-- 14 files changed, 30 insertions(+), 97 deletions(-) diff --git a/runtime/bindings/python/tests/__init__.py b/runtime/bindings/python/tests/__init__.py index d6efec455e9e5f..3588b4852a3bdc 100644 --- a/runtime/bindings/python/tests/__init__.py +++ b/runtime/bindings/python/tests/__init__.py @@ -26,7 +26,6 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True): xfail_accuracy = xfail_test(reason="Accuracy") xfail_issue_69444 = xfail_test(reason="failed with accuracy issue") skip_issue_69443 = pytest.mark.skip(reason="Segmentation fault due to empty pads_begin, pads_end") -skip_issue_67415 = pytest.mark.skip(reason="RuntimeError: Unsupported data type for when filling blob!") xfail_issue_67415 = xfail_test(reason="RuntimeError: Unsupported data type for when filling blob!") xfail_issue_33488 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " "MaxUnpool") @@ -108,8 +107,6 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True): "/openvino/ngraph/src/ngraph/op/util/elementwise_args.cpp:48:") xfail_issue_39658 = xfail_test(reason="RuntimeError: Tile operation has a form that is not supported." " z should be converted to TileIE operation.") -xfail_issue_39659 = xfail_test(reason="RuntimeError: Broadcast operation has a form that is not supported." - " y should be converted to Tile operation.") xfail_issue_39662 = xfail_test(reason="RuntimeError: 'ScatterElementsUpdate' layer with name 'y' have " "indices value that points to non-existing output tensor element") diff --git a/runtime/bindings/python/tests/runtime.py b/runtime/bindings/python/tests/runtime.py index 43bfbb3df07841..61712cfd314cd8 100644 --- a/runtime/bindings/python/tests/runtime.py +++ b/runtime/bindings/python/tests/runtime.py @@ -8,11 +8,10 @@ import numpy as np -from openvino import Core, Blob +from openvino import Core from openvino.exceptions import UserInputError from openvino.impl import Function, Node, PartialShape, Type -from openvino.opset1.ops import result from openvino.utils.types import NumericData, get_shape, get_dtype import tests @@ -81,7 +80,6 @@ def __repr__(self) -> str: params_string = ", ".join([param.name for param in self.parameters]) return "".format(self.function.get_name(), params_string) - def convert_buffers(self, source_buffers, target_dtypes): converted_buffers = [] for i in range(len(source_buffers)): diff --git a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py index a33c3d599b33c7..2db8d5340a645e 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py @@ -109,19 +109,19 @@ def test_set_tensors(device): assert np.allclose(tensor3.data, t5.data, atol=1e-2, rtol=1e-2) request.set_input_tensor(tensor3) - t6 = request.get_tensor(request.input_tensors[0]) + t6 = request.get_tensor(request.inputs[0]) assert np.allclose(tensor3.data, t6.data, atol=1e-2, rtol=1e-2) request.set_input_tensor(0, tensor1) - t7 = request.get_tensor(request.input_tensors[0]) + t7 = request.get_tensor(request.inputs[0]) assert np.allclose(tensor1.data, t7.data, atol=1e-2, rtol=1e-2) request.set_output_tensor(tensor2) - t8 = request.get_tensor(request.output_tensors[0]) + t8 = request.get_tensor(request.outputs[0]) assert np.allclose(tensor2.data, t8.data, atol=1e-2, rtol=1e-2) request.set_output_tensor(0, tensor4) - t9 = request.get_tensor(request.output_tensors[0]) + t9 = request.get_tensor(request.outputs[0]) assert np.allclose(tensor4.data, t9.data, atol=1e-2, rtol=1e-2) diff --git a/runtime/bindings/python/tests/test_ngraph/test_basic.py b/runtime/bindings/python/tests/test_ngraph/test_basic.py index 02fb9f4ca3acdb..e7bbfbe625485c 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_basic.py +++ b/runtime/bindings/python/tests/test_ngraph/test_basic.py @@ -17,8 +17,6 @@ from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node -from tests import skip_issue_67415 - def test_ngraph_function_api(): shape = [2, 2] @@ -48,7 +46,7 @@ def test_ngraph_function_api(): "dtype", [ np.float32, - pytest.param(np.float64, marks=skip_issue_67415), + np.float64, np.int8, np.int16, np.int32, @@ -173,7 +171,7 @@ def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type) ) def test_convert_to_int(destination_type, expected_type): np.random.seed(133391) - input_data = (np.ceil(-8 + np.random.rand(2, 3, 4) * 16)).astype(np.float32) + input_data = (np.ceil(-8 + np.random.rand(2, 3, 4) * 16)).astype(expected_type) expected = np.array(input_data, dtype=expected_type) result = run_op_node([input_data], ops.convert, destination_type) assert np.allclose(result, expected) @@ -195,7 +193,7 @@ def test_convert_to_int(destination_type, expected_type): ) def test_convert_to_uint(destination_type, expected_type): np.random.seed(133391) - input_data = np.ceil(np.random.rand(2, 3, 4) * 16).astype(np.float32) + input_data = np.ceil(np.random.rand(2, 3, 4) * 16).astype(expected_type) expected = np.array(input_data, dtype=expected_type) result = run_op_node([input_data], ops.convert, destination_type) assert np.allclose(result, expected) diff --git a/runtime/bindings/python/tests/test_ngraph/test_data_movement.py b/runtime/bindings/python/tests/test_ngraph/test_data_movement.py index 0baac90c011e0a..624427210a888b 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_data_movement.py +++ b/runtime/bindings/python/tests/test_ngraph/test_data_movement.py @@ -8,8 +8,6 @@ from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node -from tests import xfail_issue_67415 - def test_reverse_sequence(): input_data = np.array( @@ -180,7 +178,6 @@ def test_pad_constant(): assert np.allclose(result, expected) -@xfail_issue_67415 def test_select(): cond = np.array([[False, False], [True, False], [True, True]]) then_node = np.array([[-1, 0], [1, 2], [3, 4]], dtype=np.int32) diff --git a/runtime/bindings/python/tests/test_ngraph/test_ops.py b/runtime/bindings/python/tests/test_ngraph/test_ops.py index 4e4ce9005cfce3..4719466c451380 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_ops.py +++ b/runtime/bindings/python/tests/test_ngraph/test_ops.py @@ -10,7 +10,6 @@ from openvino.impl.op import Constant, Parameter from tests.runtime import get_runtime -from tests import xfail_issue_67415 from tests import xfail_issue_54663 @@ -523,7 +522,6 @@ def test_axisset(): assert set(tuple_axisset) == set(set_axisset) -@xfail_issue_67415 def test_select(): element_type = Type.f32 A = Parameter(Type.boolean, Shape([1, 2])) diff --git a/runtime/bindings/python/tests/test_ngraph/test_ops_binary.py b/runtime/bindings/python/tests/test_ngraph/test_ops_binary.py index 7a910eae3c5b58..8e965c9e3f01c3 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_ops_binary.py +++ b/runtime/bindings/python/tests/test_ngraph/test_ops_binary.py @@ -10,8 +10,6 @@ from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node -from tests import xfail_issue_67415 - @pytest.mark.parametrize( "ng_api_helper,numpy_function", @@ -84,7 +82,6 @@ def test_binary_op_with_scalar(ng_api_helper, numpy_function): assert np.allclose(result, expected) -@xfail_issue_67415 @pytest.mark.parametrize( "ng_api_helper,numpy_function", [(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)], @@ -107,7 +104,6 @@ def test_binary_logical_op(ng_api_helper, numpy_function): assert np.allclose(result, expected) -@xfail_issue_67415 @pytest.mark.parametrize( "ng_api_helper,numpy_function", [(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)], diff --git a/runtime/bindings/python/tests/test_ngraph/test_ops_unary.py b/runtime/bindings/python/tests/test_ngraph/test_ops_unary.py index e304914180210a..7ce4f98e94535a 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_ops_unary.py +++ b/runtime/bindings/python/tests/test_ngraph/test_ops_unary.py @@ -9,8 +9,6 @@ from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node -from tests import xfail_issue_67415 - @pytest.mark.parametrize( "ng_api_fn, numpy_fn, range_start, range_end", @@ -79,7 +77,6 @@ def test_unary_op_scalar(ng_api_fn, numpy_fn, input_data): assert np.allclose(result, expected) -@xfail_issue_67415 @pytest.mark.parametrize( "input_data", [(np.array([True, False, True, False])), (np.array([True])), (np.array([False]))] ) diff --git a/runtime/bindings/python/tests/test_ngraph/test_reduction.py b/runtime/bindings/python/tests/test_ngraph/test_reduction.py index 12334fbe82fa89..8ac606eaa855ec 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_reduction.py +++ b/runtime/bindings/python/tests/test_ngraph/test_reduction.py @@ -10,8 +10,6 @@ from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node -from tests import xfail_issue_67415 - @pytest.mark.parametrize( "ng_api_helper, numpy_function, reduction_axes", @@ -40,7 +38,6 @@ def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes): assert np.allclose(result, expected) -@xfail_issue_67415 @pytest.mark.parametrize( "ng_api_helper, numpy_function, reduction_axes", [ diff --git a/runtime/bindings/python/tests/test_ngraph/test_sequence_processing.py b/runtime/bindings/python/tests/test_ngraph/test_sequence_processing.py index ebef086f174954..c394225230e8bd 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_sequence_processing.py +++ b/runtime/bindings/python/tests/test_ngraph/test_sequence_processing.py @@ -6,8 +6,7 @@ import openvino.opset8 as ov from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node -from tests import (xfail_issue_47337, - xfail_accuracy) +from tests import (xfail_issue_47337) def test_onehot(): @@ -35,7 +34,6 @@ def test_one_hot(): assert np.allclose(result, excepted) -@xfail_accuracy def test_range(): start = 5 stop = 35 diff --git a/runtime/bindings/python/tests/test_onnx/test_backend.py b/runtime/bindings/python/tests/test_onnx/test_backend.py index 16f432e101d7da..3c2cc707300b4f 100644 --- a/runtime/bindings/python/tests/test_onnx/test_backend.py +++ b/runtime/bindings/python/tests/test_onnx/test_backend.py @@ -7,7 +7,6 @@ from tests import ( BACKEND_NAME, skip_rng_tests, - xfail_issue_67415, xfail_issue_33488, xfail_issue_33538, xfail_issue_33581, @@ -29,7 +28,6 @@ xfail_issue_38734, xfail_issue_38735, xfail_issue_39658, - xfail_issue_39659, xfail_issue_39662, xfail_issue_44854, xfail_issue_44858, @@ -106,51 +104,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None globals().update(backend_test.enable_report().test_cases) tests_expected_to_fail = [ - ( - xfail_issue_67415, - "OnnxBackendNodeModelTest.test_and2d_cpu", - "OnnxBackendNodeModelTest.test_and3d_cpu", - "OnnxBackendNodeModelTest.test_and4d_cpu", - "OnnxBackendNodeModelTest.test_and_bcast3v1d_cpu", - "OnnxBackendNodeModelTest.test_and_bcast3v2d_cpu", - "OnnxBackendNodeModelTest.test_and_bcast4v2d_cpu", - "OnnxBackendNodeModelTest.test_and_bcast4v3d_cpu", - "OnnxBackendNodeModelTest.test_and_bcast4v4d_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT16_to_DOUBLE_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT16_to_FLOAT_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT16_to_DOUBLE_expanded_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT16_to_FLOAT_expanded_cpu", - "OnnxBackendNodeModelTest.test_if_cpu", - "OnnxBackendNodeModelTest.test_max_float16_cpu", - "OnnxBackendNodeModelTest.test_min_float16_cpu", - "OnnxBackendNodeModelTest.test_mod_mixed_sign_float16_cpu", - "OnnxBackendNodeModelTest.test_not_2d_cpu", - "OnnxBackendNodeModelTest.test_not_3d_cpu", - "OnnxBackendNodeModelTest.test_not_4d_cpu", - "OnnxBackendNodeModelTest.test_or2d_cpu", - "OnnxBackendNodeModelTest.test_or3d_cpu", - "OnnxBackendNodeModelTest.test_or4d_cpu", - "OnnxBackendNodeModelTest.test_or_bcast3v1d_cpu", - "OnnxBackendNodeModelTest.test_or_bcast3v2d_cpu", - "OnnxBackendNodeModelTest.test_or_bcast4v2d_cpu", - "OnnxBackendNodeModelTest.test_or_bcast4v3d_cpu", - "OnnxBackendNodeModelTest.test_or_bcast4v4d_cpu", - "OnnxBackendNodeModelTest.test_where_example_cpu", - "OnnxBackendNodeModelTest.test_where_long_example_cpu", - "OnnxBackendNodeModelTest.test_xor2d_cpu", - "OnnxBackendNodeModelTest.test_xor3d_cpu", - "OnnxBackendNodeModelTest.test_xor4d_cpu", - "OnnxBackendNodeModelTest.test_xor_bcast3v1d_cpu", - "OnnxBackendNodeModelTest.test_xor_bcast3v2d_cpu", - "OnnxBackendNodeModelTest.test_xor_bcast4v2d_cpu", - "OnnxBackendNodeModelTest.test_xor_bcast4v3d_cpu", - "OnnxBackendNodeModelTest.test_xor_bcast4v4d_cpu", - "OnnxBackendNodeModelTest.test_compress_0_cpu", - "OnnxBackendNodeModelTest.test_compress_1_cpu", - "OnnxBackendNodeModelTest.test_compress_default_axis_cpu", - "OnnxBackendNodeModelTest.test_compress_negative_axis_cpu", - "OnnxBackendNodeModelTest.test_nonzero_example_cpu", - ), ( xfail_issue_49207, "OnnxBackendNodeModelTest.test_rnn_seq_length_cpu", @@ -174,10 +127,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None "OnnxBackendNodeModelTest.test_tile_cpu", "OnnxBackendNodeModelTest.test_tile_precomputed_cpu", ), - ( - xfail_issue_39659, - "OnnxBackendNodeModelTest.test_constantofshape_int_shape_zero_cpu", - ), ( xfail_issue_39662, "OnnxBackendNodeModelTest.test_scatter_elements_with_negative_indices_cpu", diff --git a/runtime/bindings/python/tests/test_onnx/test_onnx_import.py b/runtime/bindings/python/tests/test_onnx/test_onnx_import.py index 7c59a9462c989d..7f5387d20284f9 100644 --- a/runtime/bindings/python/tests/test_onnx/test_onnx_import.py +++ b/runtime/bindings/python/tests/test_onnx/test_onnx_import.py @@ -47,5 +47,19 @@ def test_simple_graph(): runtime = get_runtime() computation = runtime.computation(ng_model_function) - assert np.array_equal(computation(1, 2, 3)[0], np.array([6.0], dtype=np.float32)) - assert np.array_equal(computation(4, 5, 6)[0], np.array([15.0], dtype=np.float32)) + assert np.array_equal( + computation( + np.array([1], dtype=np.float32), + np.array([2], dtype=np.float32), + np.array([3], dtype=np.float32), + )[0], + np.array([6.0], dtype=np.float32), + ) + assert np.array_equal( + computation( + np.array([4], dtype=np.float32), + np.array([5], dtype=np.float32), + np.array([6], dtype=np.float32), + )[0], + np.array([15.0], dtype=np.float32), + ) diff --git a/runtime/bindings/python/tests/test_onnx/test_ops_logical.py b/runtime/bindings/python/tests/test_onnx/test_ops_logical.py index aabd4304277bf7..150d50673586ef 100644 --- a/runtime/bindings/python/tests/test_onnx/test_ops_logical.py +++ b/runtime/bindings/python/tests/test_onnx/test_ops_logical.py @@ -7,15 +7,13 @@ from tests.test_onnx.utils import run_node -from tests import xfail_issue_67415 - @pytest.mark.parametrize( "onnx_op, numpy_func, data_type", [ - pytest.param("And", np.logical_and, np.bool, marks=xfail_issue_67415), - pytest.param("Or", np.logical_or, np.bool, marks=xfail_issue_67415), - pytest.param("Xor", np.logical_xor, np.bool, marks=xfail_issue_67415), + pytest.param("And", np.logical_and, np.bool), + pytest.param("Or", np.logical_or, np.bool), + pytest.param("Xor", np.logical_xor, np.bool), pytest.param("Equal", np.equal, np.int32), pytest.param("Greater", np.greater, np.int32), pytest.param("Less", np.less, np.int32), @@ -37,7 +35,6 @@ def test_logical(onnx_op, numpy_func, data_type): assert np.array_equal(ng_results, [expected_output]) -@xfail_issue_67415 def test_logical_not(): input_data = np.array([[False, True, True], [False, True, False], [False, False, True]]) expected_output = np.logical_not(input_data) diff --git a/runtime/bindings/python/tests/test_onnx/test_ops_unary.py b/runtime/bindings/python/tests/test_onnx/test_ops_unary.py index 041466663d23a5..201458c629a60b 100644 --- a/runtime/bindings/python/tests/test_onnx/test_ops_unary.py +++ b/runtime/bindings/python/tests/test_onnx/test_ops_unary.py @@ -11,8 +11,6 @@ from tests.runtime import get_runtime from tests.test_onnx.utils import get_node_model, import_onnx_model, run_model, run_node -from tests import skip_issue_67415 - @pytest.mark.parametrize( "input_data", @@ -333,7 +331,6 @@ def test_cast_to_bool(val_type, input_data): assert np.allclose(result, expected) -@skip_issue_67415 @pytest.mark.parametrize( "val_type, range_start, range_end, in_dtype", [ @@ -359,7 +356,7 @@ def test_cast_to_float(val_type, range_start, range_end, in_dtype): ) def test_cast_to_int(val_type): np.random.seed(133391) - input_data = np.ceil(-8 + np.random.rand(2, 3, 4) * 16) + input_data = np.ceil(-8 + np.random.rand(2, 3, 4) * 16).astype(val_type) expected = np.array(input_data, dtype=val_type) model = get_node_model("Cast", input_data, opset=6, to=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[val_type]) @@ -372,7 +369,7 @@ def test_cast_to_int(val_type): ) def test_cast_to_uint(val_type): np.random.seed(133391) - input_data = np.ceil(np.random.rand(2, 3, 4) * 16) + input_data = np.ceil(np.random.rand(2, 3, 4) * 16).astype(val_type) expected = np.array(input_data, dtype=val_type) model = get_node_model("Cast", input_data, opset=6, to=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[val_type]) From 7f457ed9780f8b587a9f58945d56b9366f836e3f Mon Sep 17 00:00:00 2001 From: jiwaszki Date: Fri, 5 Nov 2021 09:47:35 +0000 Subject: [PATCH 18/46] Add FLOAT_LIKE xfails --- .../src/mkldnn_plugin/mkldnn_plugin.cpp | 3 +-- runtime/bindings/python/tests/__init__.py | 1 + .../python/tests/test_onnx/test_backend.py | 15 +++++++++++++++ 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp index f2400105262f34..13e6aae19f010a 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp @@ -134,7 +134,6 @@ static void TransformationUpToCPUSpecificOpSet(std::shared_ptr manager.register_pass( std::vector{ ngraph::element::i8, ngraph::element::u8, ngraph::element::i4, ngraph::element::u4 }); } - // BRUH auto get_convert_precisions = []() { precisions_array array = { {ngraph::element::i64, ngraph::element::i32}, @@ -440,7 +439,7 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std // verification of supported input InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo(); for (const auto &ii : _networkInputs) { - auto input_precision = ii.second->getPrecision(); // BRUH + auto input_precision = ii.second->getPrecision(); if (input_precision != InferenceEngine::Precision::FP64 && input_precision != InferenceEngine::Precision::FP32 && input_precision != InferenceEngine::Precision::I32 && diff --git a/runtime/bindings/python/tests/__init__.py b/runtime/bindings/python/tests/__init__.py index 3588b4852a3bdc..6569a37dfaf979 100644 --- a/runtime/bindings/python/tests/__init__.py +++ b/runtime/bindings/python/tests/__init__.py @@ -24,6 +24,7 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True): skip_segfault = pytest.mark.skip(reason="Segmentation fault error") xfail_accuracy = xfail_test(reason="Accuracy") +xfail_issue_FLOAT_LIKE = xfail_test(reason="Use of bfloat16 or float16") xfail_issue_69444 = xfail_test(reason="failed with accuracy issue") skip_issue_69443 = pytest.mark.skip(reason="Segmentation fault due to empty pads_begin, pads_end") xfail_issue_67415 = xfail_test(reason="RuntimeError: Unsupported data type for when filling blob!") diff --git a/runtime/bindings/python/tests/test_onnx/test_backend.py b/runtime/bindings/python/tests/test_onnx/test_backend.py index 3c2cc707300b4f..4efdf120d4cd3e 100644 --- a/runtime/bindings/python/tests/test_onnx/test_backend.py +++ b/runtime/bindings/python/tests/test_onnx/test_backend.py @@ -6,6 +6,7 @@ import onnx.backend.test from tests import ( BACKEND_NAME, + xfail_issue_FLOAT_LIKE, skip_rng_tests, xfail_issue_33488, xfail_issue_33538, @@ -104,6 +105,20 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None globals().update(backend_test.enable_report().test_cases) tests_expected_to_fail = [ + ( + xfail_issue_FLOAT_LIKE, + "OnnxBackendNodeModelTest.test_cast_BFLOAT16_to_FLOAT_cpu", + "OnnxBackendNodeModelTest.test_cast_FLOAT16_to_DOUBLE_cpu", + "OnnxBackendNodeModelTest.test_cast_FLOAT16_to_FLOAT_cpu", + "OnnxBackendNodeModelTest.test_cast_FLOAT_to_BFLOAT16_cpu", + "OnnxBackendNodeModelTest.test_castlike_BFLOAT16_to_FLOAT_expanded_cpu", + "OnnxBackendNodeModelTest.test_castlike_FLOAT16_to_DOUBLE_expanded_cpu", + "OnnxBackendNodeModelTest.test_castlike_FLOAT16_to_FLOAT_expanded_cpu", + "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_BFLOAT16_expanded_cpu", + "OnnxBackendNodeModelTest.test_max_float16_cpu", + "OnnxBackendNodeModelTest.test_min_float16_cpu", + "OnnxBackendNodeModelTest.test_mod_mixed_sign_float16_cpu", + ), ( xfail_issue_49207, "OnnxBackendNodeModelTest.test_rnn_seq_length_cpu", From 9cebdc728922f1bf35067f5c37ec5baaa7f19934 Mon Sep 17 00:00:00 2001 From: Alexey Lebedev Date: Mon, 8 Nov 2021 10:59:03 +0300 Subject: [PATCH 19/46] [Python API] bind ProfilingInfo (#55) * bind ProfilingInfo * Add tests * Fix code style * Add property --- .../bindings/python/src/openvino/__init__.py | 1 + .../src/pyopenvino/core/infer_request.cpp | 9 ++++++ .../src/pyopenvino/core/profiling_info.cpp | 28 +++++++++++++++++++ .../src/pyopenvino/core/profiling_info.hpp | 11 ++++++++ .../python/src/pyopenvino/pyopenvino.cpp | 2 ++ .../test_infer_request.py | 17 +++++------ 6 files changed, 60 insertions(+), 8 deletions(-) create mode 100644 runtime/bindings/python/src/pyopenvino/core/profiling_info.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/core/profiling_info.hpp diff --git a/runtime/bindings/python/src/openvino/__init__.py b/runtime/bindings/python/src/openvino/__init__.py index 5634960d064d8c..ceab3f6b48f6d2 100644 --- a/runtime/bindings/python/src/openvino/__init__.py +++ b/runtime/bindings/python/src/openvino/__init__.py @@ -45,6 +45,7 @@ from openvino.pyopenvino import ColorFormat from openvino.pyopenvino import PreProcessChannel from openvino.pyopenvino import Tensor +from openvino.pyopenvino import ProfilingInfo from openvino import opset1 from openvino import opset2 diff --git a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp index f3885bcdbfa633..ad9374b5dd1f36 100644 --- a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp @@ -254,6 +254,10 @@ void regclass_InferRequest(py::module m) { }, py::arg("tensor")); + cls.def("get_profiling_info", [](InferRequestWrapper& self) { + return self._request.get_profiling_info(); + }); + cls.def_property_readonly("inputs", [](InferRequestWrapper& self) { return self._inputs; }); @@ -281,4 +285,9 @@ void regclass_InferRequest(py::module m) { cls.def_property_readonly("latency", [](InferRequestWrapper& self) { return self.get_latency(); }); + + cls.def_property_readonly("profiling_info", [](InferRequestWrapper& self) { + return self._request.get_profiling_info(); + }); + } diff --git a/runtime/bindings/python/src/pyopenvino/core/profiling_info.cpp b/runtime/bindings/python/src/pyopenvino/core/profiling_info.cpp new file mode 100644 index 00000000000000..9619e070386017 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/profiling_info.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/core/profiling_info.hpp" + +#include + +#include "openvino/runtime/profiling_info.hpp" + +namespace py = pybind11; + +void regclass_ProfilingInfo(py::module m) { + py::class_> cls(m, "ProfilingInfo"); + cls.def(py::init<>()) + .def_readwrite("status", &ov::runtime::ProfilingInfo::status) + .def_readwrite("real_time", &ov::runtime::ProfilingInfo::real_time) + .def_readwrite("cpu_time", &ov::runtime::ProfilingInfo::cpu_time) + .def_readwrite("node_name", &ov::runtime::ProfilingInfo::node_name) + .def_readwrite("exec_type", &ov::runtime::ProfilingInfo::exec_type) + .def_readwrite("node_type", &ov::runtime::ProfilingInfo::node_type); + + py::enum_(cls, "Status") + .value("NOT_RUN", ov::runtime::ProfilingInfo::Status::NOT_RUN) + .value("OPTIMIZED_OUT", ov::runtime::ProfilingInfo::Status::OPTIMIZED_OUT) + .value("EXECUTED", ov::runtime::ProfilingInfo::Status::EXECUTED) + .export_values(); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/profiling_info.hpp b/runtime/bindings/python/src/pyopenvino/core/profiling_info.hpp new file mode 100644 index 00000000000000..023935f620e913 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/profiling_info.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_ProfilingInfo(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp index 5f9e66c064d89f..37cdab7e042a00 100644 --- a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -33,6 +33,7 @@ #include "pyopenvino/core/ie_preprocess_info.hpp" #include "pyopenvino/core/infer_request.hpp" #include "pyopenvino/core/offline_transformations.hpp" +#include "pyopenvino/core/profiling_info.hpp" #include "pyopenvino/core/tensor.hpp" #include "pyopenvino/core/tensor_description.hpp" #include "pyopenvino/core/version.hpp" @@ -149,6 +150,7 @@ PYBIND11_MODULE(pyopenvino, m) { regclass_Parameter(m); regclass_InputInfo(m); // regclass_InferQueue(m); + regclass_ProfilingInfo(m); regclass_PreProcessInfo(m); regmodule_offline_transformations(m); diff --git a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py index 2db8d5340a645e..0d058b518fb451 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py @@ -4,9 +4,10 @@ import numpy as np import os import pytest +import datetime from ..conftest import image_path, model_path -from openvino import Core, Tensor +from openvino import Core, Tensor, ProfilingInfo is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD" test_net_xml, test_net_bin = model_path(is_myriad) @@ -25,7 +26,6 @@ def read_image(): return image -@pytest.mark.skip(reason="ProfilingInfo has to be bound") def test_get_profiling_info(device): core = Core() func = core.read_model(test_net_xml, test_net_bin) @@ -34,12 +34,13 @@ def test_get_profiling_info(device): img = read_image() request = exec_net.create_infer_request() request.infer({0: img}) - pc = request.get_profiling_info() - - assert pc["29"]["status"] == "EXECUTED" - assert pc["29"]["layer_type"] == "FullyConnected" - del exec_net - del core + prof_info = request.get_profiling_info() + soft_max_node = next(node for node in prof_info if node.node_name == "fc_out") + assert soft_max_node.node_type == "Softmax" + assert soft_max_node.status == ProfilingInfo.Status.OPTIMIZED_OUT + assert isinstance(soft_max_node.real_time, datetime.timedelta) + assert isinstance(soft_max_node.cpu_time, datetime.timedelta) + assert isinstance(soft_max_node.exec_type, str) def test_tensor_setter(device): From f2d8e88976f6ca33a9d5a711568f74b101a82f3a Mon Sep 17 00:00:00 2001 From: akuporos Date: Mon, 8 Nov 2021 11:22:28 +0300 Subject: [PATCH 20/46] fix codestyle --- runtime/bindings/python/src/pyopenvino/core/infer_request.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp index ad9374b5dd1f36..358ffc0c153569 100644 --- a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp @@ -289,5 +289,4 @@ void regclass_InferRequest(py::module m) { cls.def_property_readonly("profiling_info", [](InferRequestWrapper& self) { return self._request.get_profiling_info(); }); - } From e62512f5fe2c16b59c566b415df968b658f675c5 Mon Sep 17 00:00:00 2001 From: Bartek Szmelczynski Date: Mon, 8 Nov 2021 11:05:23 +0100 Subject: [PATCH 21/46] Infer new request method (#56) * fix conflicts, add infer_new_request function * remove redundant functions, fix style * revert the unwanted changes * revert removal of the Blob * revert removal of isTblob --- .../bindings/python/src/openvino/__init__.py | 3 +- .../bindings/python/src/openvino/ie_api.py | 9 +++ .../python/src/pyopenvino/core/common.hpp | 1 - .../pyopenvino/core/executable_network.cpp | 39 ++++++++++-- .../test_executable_network.py | 60 ++++++++++++++++++- 5 files changed, 104 insertions(+), 8 deletions(-) diff --git a/runtime/bindings/python/src/openvino/__init__.py b/runtime/bindings/python/src/openvino/__init__.py index ceab3f6b48f6d2..d6d94dac942479 100644 --- a/runtime/bindings/python/src/openvino/__init__.py +++ b/runtime/bindings/python/src/openvino/__init__.py @@ -18,6 +18,7 @@ from openvino.ie_api import start_async from openvino.ie_api import blob_from_file from openvino.ie_api import tensor_from_file +from openvino.ie_api import infer_new_request from openvino.impl import Dimension from openvino.impl import Function @@ -79,7 +80,7 @@ # this class will be removed Blob = BlobWrapper # Patching ExecutableNetwork -ExecutableNetwork.infer = infer +ExecutableNetwork.infer_new_request = infer_new_request # Patching InferRequest InferRequest.infer = infer InferRequest.start_async = start_async diff --git a/runtime/bindings/python/src/openvino/ie_api.py b/runtime/bindings/python/src/openvino/ie_api.py index 3d15dc52573f3b..af4d0e97f0625b 100644 --- a/runtime/bindings/python/src/openvino/ie_api.py +++ b/runtime/bindings/python/src/openvino/ie_api.py @@ -3,6 +3,7 @@ import numpy as np import copy +from typing import List from openvino.pyopenvino import TBlobFloat32 from openvino.pyopenvino import TBlobFloat64 @@ -16,6 +17,7 @@ from openvino.pyopenvino import TBlobUint8 from openvino.pyopenvino import TensorDesc from openvino.pyopenvino import InferRequest +from openvino.pyopenvino import ExecutableNetwork from openvino.pyopenvino import Tensor @@ -47,6 +49,13 @@ def infer(request: InferRequest, inputs: dict = None) -> np.ndarray: # dimensions. This results in errors when running ops like variadic split. return [copy.deepcopy(tensor.data) for tensor in res] + +def infer_new_request(exec_net: ExecutableNetwork, inputs: dict = None) -> List[np.ndarray]: + res = exec_net._infer_new_request(inputs=normalize_inputs(inputs if inputs is not None else {})) + # Required to return list since np.ndarray forces all of tensors data to match in + # dimensions. This results in errors when running ops like variadic split. + return [copy.deepcopy(tensor.data) for tensor in res] + # flake8: noqa: D102 def start_async(request: InferRequest, inputs: dict = None) -> None: # type: ignore request._start_async(inputs=normalize_inputs(inputs if inputs is not None else {})) diff --git a/runtime/bindings/python/src/pyopenvino/core/common.hpp b/runtime/bindings/python/src/pyopenvino/core/common.hpp index fb7a47e6be15af..d4be9bd2a77995 100644 --- a/runtime/bindings/python/src/pyopenvino/core/common.hpp +++ b/runtime/bindings/python/src/pyopenvino/core/common.hpp @@ -61,5 +61,4 @@ namespace Common void set_request_blobs(InferenceEngine::InferRequest& request, const py::dict& dictonary); uint32_t get_optimal_number_of_requests(const InferenceEngine::ExecutableNetwork& actual); - }; // namespace Common diff --git a/runtime/bindings/python/src/pyopenvino/core/executable_network.cpp b/runtime/bindings/python/src/pyopenvino/core/executable_network.cpp index a5b04aab2c18cb..389a865aea1b0a 100644 --- a/runtime/bindings/python/src/pyopenvino/core/executable_network.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/executable_network.cpp @@ -1,5 +1,6 @@ // Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 +// #include "openvino/runtime/executable_network.hpp" @@ -7,9 +8,11 @@ #include "common.hpp" #include "pyopenvino/core/containers.hpp" -#include "pyopenvino/core/ie_input_info.hpp" #include "pyopenvino/core/infer_request.hpp" +PYBIND11_MAKE_OPAQUE(Containers::TensorIndexMap); +PYBIND11_MAKE_OPAQUE(Containers::TensorNameMap); + namespace py = pybind11; void regclass_ExecutableNetwork(py::module m) { @@ -21,10 +24,36 @@ void regclass_ExecutableNetwork(py::module m) { return InferRequestWrapper(self.create_infer_request(), self.inputs(), self.outputs()); }); - // cls.def("infer_new_request", [](ov::runtime::ExecutableNetwork& self, const py::dict& inputs) { - // TODO: implment after https://github.com/openvinotoolkit/openvino/pull/7962 - // will be merged as a seperate ticket - // }); + cls.def( + "_infer_new_request", + [](ov::runtime::ExecutableNetwork& self, const py::dict& inputs) { + auto request = self.create_infer_request(); + const auto key = inputs.begin()->first; + if (!inputs.empty()) { + if (py::isinstance(key)) { + auto inputs_map = Common::cast_to_tensor_name_map(inputs); + for (auto&& input : inputs_map) { + request.set_tensor(input.first, input.second); + } + } else if (py::isinstance(key)) { + auto inputs_map = Common::cast_to_tensor_index_map(inputs); + for (auto&& input : inputs_map) { + request.set_input_tensor(input.first, input.second); + } + } else { + throw py::type_error("Incompatible key type! Supported types are string and int."); + } + } + + request.infer(); + + Containers::InferResults results; + for (const auto out : self.outputs()) { + results.push_back(request.get_tensor(out)); + } + return results; + }, + py::arg("inputs")); cls.def("export_model", &ov::runtime::ExecutableNetwork::export_model, py::arg("network_model")); diff --git a/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py b/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py index ec1eaf2f8112b9..99a5c8e96f12d9 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py @@ -2,13 +2,14 @@ # SPDX-License-Identifier: Apache-2.0 import os +from numpy.core.fromnumeric import argmax import pytest import numpy as np from ..conftest import model_path, image_path from openvino.impl import Function, ConstOutput, Shape -from openvino import Core +from openvino import Core, Tensor is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD" test_net_xml, test_net_bin = model_path(is_myriad) @@ -236,3 +237,60 @@ def test_inputs_docs(device): input_0 = inputs[0] expected_string = "openvino.impl.ConstOutput wraps ov::Output" assert input_0.__doc__ == expected_string + + +def test_infer_new_request_numpy(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + img = read_image() + exec_net = ie.compile_model(func, device) + res = exec_net.infer_new_request({'data': img}) + assert np.argmax(res) == 2 + + +def test_infer_new_request_tensor_numpy_copy(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + img = read_image() + tensor = Tensor(img) + exec_net = ie.compile_model(func, device) + res_tensor = exec_net.infer_new_request({'data': tensor}) + res_img = exec_net.infer_new_request({'data': tensor}) + assert np.argmax(res_tensor) == 2 + assert np.argmax(res_tensor) == np.argmax(res_img) + + +def test_infer_tensor_numpy_shared_memory(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + img = read_image() + img = np.ascontiguousarray(img) + tensor = Tensor(img, shared_memory=True) + exec_net = ie.compile_model(func, device) + res_tensor = exec_net.infer_new_request({'data': tensor}) + res_img = exec_net.infer_new_request({'data': tensor}) + assert np.argmax(res_tensor) == 2 + assert np.argmax(res_tensor) == np.argmax(res_img) + + +def test_infer_new_request_wrong_port_name(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + img = read_image() + tensor = Tensor(img) + exec_net = ie.compile_model(func, device) + with pytest.raises(RuntimeError) as e: + exec_net.infer_new_request({'_data_': tensor}) + assert "Port for tensor name _data_ was not found." in str(e.value) + + +def test_infer_tensor_wrong_input_data(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + img = read_image() + img = np.ascontiguousarray(img) + tensor = Tensor(img, shared_memory=True) + exec_net = ie.compile_model(func, device) + with pytest.raises(TypeError) as e: + exec_net.infer_new_request({4.5: tensor}) + assert "Incompatible key type!" in str(e.value) From 3621e39f293d2daf3cf65a8a1bdb901959dce11a Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Mon, 8 Nov 2021 13:20:57 +0300 Subject: [PATCH 22/46] add add_extension from path --- .../python/src/pyopenvino/core/core.cpp | 7 ++ runtime/bindings/python/tests/conftest.py | 1 + .../tests/test_inference_engine/test_core.py | 72 +++++++++++++++++++ 3 files changed, 80 insertions(+) diff --git a/runtime/bindings/python/src/pyopenvino/core/core.cpp b/runtime/bindings/python/src/pyopenvino/core/core.cpp index 63d23ca9fc87b6..7538ffd7f79f23 100644 --- a/runtime/bindings/python/src/pyopenvino/core/core.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/core.cpp @@ -101,5 +101,12 @@ void regclass_Core(py::module m) { py::arg("device_name"), py::arg("config") = py::dict()); + cls.def( + "add_extension", + [](ov::runtime::Core& self, const std::string& library_path) { + return self.add_extension(library_path); + }, + py::arg("library_path")); + cls.def_property_readonly("available_devices", &ov::runtime::Core::get_available_devices); } diff --git a/runtime/bindings/python/tests/conftest.py b/runtime/bindings/python/tests/conftest.py index 865ae43552e0ec..484c59f2a5f5d1 100644 --- a/runtime/bindings/python/tests/conftest.py +++ b/runtime/bindings/python/tests/conftest.py @@ -78,6 +78,7 @@ def pytest_configure(config): config.addinivalue_line("markers", "skip_on_hetero: Skip test on HETERO") config.addinivalue_line("markers", "skip_on_template: Skip test on TEMPLATE") config.addinivalue_line("markers", "onnx_coverage: Collect ONNX operator coverage") + config.addinivalue_line("markers", "template_extension") def pytest_collection_modifyitems(config, items): diff --git a/runtime/bindings/python/tests/test_inference_engine/test_core.py b/runtime/bindings/python/tests/test_inference_engine/test_core.py index b098e5056aff09..f715de65ef6b93 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_core.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_core.py @@ -262,3 +262,75 @@ def test_unregister_plugin(device): with pytest.raises(RuntimeError) as e: ie.load_network(func, device) assert f"Device with '{device}' name is not registered in the InferenceEngine" in str(e.value) + + +@pytest.mark.xfail("68212") +@pytest.mark.template_extension +def test_add_extension(device): + model = bytes(b""" + + + + + + 2 + 2 + 2 + 1 + + + + + + + + 2 + 2 + 2 + 1 + + + + + 2 + 2 + 2 + 1 + + + + + + + 2 + 2 + 2 + 1 + + + + + + + + +""") + + core = Core() + if platform == "win32": + core.add_extension(extension_path="template_extension.dll") + else: + core.add_extension(extension_path="libtemplate_extension.so") + func = core.read_model(model=model, init_from_buffer=True) + assert isinstance(func, Function) + + # input_blob = next(iter(network.input_info)) + # n, c, h, w = network.input_info[input_blob].input_data.shape + + # input_values = np.ndarray(buffer=np.array([1, 2, 3, 4, 5, 6, 7, 8]), shape = (n, c, h, w), dtype=int) + # expected = np.ndarray(buffer=np.array([12, 13, 14, 15, 16, 17, 18, 19]), shape = (n, c, h, w), dtype=int) + # + # exec_network = core.compile_model(func, device) + # computed = exec_network.infer_new_request(inputs={input_blob : input_values}) + # output_blob = next(iter(network.outputs)) + # assert np.allclose(expected, computed[output_blob], atol=1e-2, rtol=1e-2) From efd844dbcd28d2dac5fb002c2e3eb57915000a77 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Mon, 8 Nov 2021 14:07:00 +0300 Subject: [PATCH 23/46] codestyle --- .../python/tests/test_inference_engine/test_core.py | 3 ++- .../test_executable_network.py | 13 ++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/runtime/bindings/python/tests/test_inference_engine/test_core.py b/runtime/bindings/python/tests/test_inference_engine/test_core.py index f715de65ef6b93..81621de432abf0 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_core.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_core.py @@ -328,7 +328,8 @@ def test_add_extension(device): # n, c, h, w = network.input_info[input_blob].input_data.shape # input_values = np.ndarray(buffer=np.array([1, 2, 3, 4, 5, 6, 7, 8]), shape = (n, c, h, w), dtype=int) - # expected = np.ndarray(buffer=np.array([12, 13, 14, 15, 16, 17, 18, 19]), shape = (n, c, h, w), dtype=int) + # expected = np.ndarray(buffer=np.array([12, 13, 14, 15, 16, 17, 18, 19]), + # shape = (n, c, h, w), dtype=int) # # exec_network = core.compile_model(func, device) # computed = exec_network.infer_new_request(inputs={input_blob : input_values}) diff --git a/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py b/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py index 99a5c8e96f12d9..2a2e80b6c8cd52 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py @@ -2,7 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 import os -from numpy.core.fromnumeric import argmax import pytest import numpy as np @@ -244,7 +243,7 @@ def test_infer_new_request_numpy(device): func = ie.read_model(model=test_net_xml, weights=test_net_bin) img = read_image() exec_net = ie.compile_model(func, device) - res = exec_net.infer_new_request({'data': img}) + res = exec_net.infer_new_request({"data": img}) assert np.argmax(res) == 2 @@ -254,8 +253,8 @@ def test_infer_new_request_tensor_numpy_copy(device): img = read_image() tensor = Tensor(img) exec_net = ie.compile_model(func, device) - res_tensor = exec_net.infer_new_request({'data': tensor}) - res_img = exec_net.infer_new_request({'data': tensor}) + res_tensor = exec_net.infer_new_request({"data": tensor}) + res_img = exec_net.infer_new_request({"data": tensor}) assert np.argmax(res_tensor) == 2 assert np.argmax(res_tensor) == np.argmax(res_img) @@ -267,8 +266,8 @@ def test_infer_tensor_numpy_shared_memory(device): img = np.ascontiguousarray(img) tensor = Tensor(img, shared_memory=True) exec_net = ie.compile_model(func, device) - res_tensor = exec_net.infer_new_request({'data': tensor}) - res_img = exec_net.infer_new_request({'data': tensor}) + res_tensor = exec_net.infer_new_request({"data": tensor}) + res_img = exec_net.infer_new_request({"data": tensor}) assert np.argmax(res_tensor) == 2 assert np.argmax(res_tensor) == np.argmax(res_img) @@ -280,7 +279,7 @@ def test_infer_new_request_wrong_port_name(device): tensor = Tensor(img) exec_net = ie.compile_model(func, device) with pytest.raises(RuntimeError) as e: - exec_net.infer_new_request({'_data_': tensor}) + exec_net.infer_new_request({"_data_": tensor}) assert "Port for tensor name _data_ was not found." in str(e.value) From 1c208ba5ce180146a6332f4d6b224297f8aecd80 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Tue, 9 Nov 2021 15:58:47 +0300 Subject: [PATCH 24/46] fix win build --- .../bindings/python/src/openvino/__init__.py | 1 - .../python/src/pyopenvino/pyopenvino.cpp | 22 ------------------- 2 files changed, 23 deletions(-) diff --git a/runtime/bindings/python/src/openvino/__init__.py b/runtime/bindings/python/src/openvino/__init__.py index d6d94dac942479..56e7605c38699a 100644 --- a/runtime/bindings/python/src/openvino/__init__.py +++ b/runtime/bindings/python/src/openvino/__init__.py @@ -36,7 +36,6 @@ from openvino.pyopenvino import DataPtr from openvino.pyopenvino import TensorDesc from openvino.pyopenvino import get_version -from openvino.pyopenvino import StatusCode #from openvino.pyopenvino import InferQueue from openvino.pyopenvino import InferRequest # TODO: move to ie_api? from openvino.pyopenvino import Blob diff --git a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp index 37cdab7e042a00..d0088018454005 100644 --- a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -1,10 +1,8 @@ // Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 -#include #include -#include #include #include #include @@ -65,26 +63,6 @@ std::string get_version() { PYBIND11_MODULE(pyopenvino, m) { m.doc() = "Package openvino.pyopenvino which wraps openvino C++ APIs"; m.def("get_version", &get_version); - py::enum_(m, "StatusCode") - .value("OK", InferenceEngine::StatusCode::OK) - .value("GENERAL_ERROR", InferenceEngine::StatusCode::GENERAL_ERROR) - .value("NOT_IMPLEMENTED", InferenceEngine::StatusCode::NOT_IMPLEMENTED) - .value("NETWORK_NOT_LOADED", InferenceEngine::StatusCode::NETWORK_NOT_LOADED) - .value("PARAMETER_MISMATCH", InferenceEngine::StatusCode::PARAMETER_MISMATCH) - .value("NOT_FOUND", InferenceEngine::StatusCode::NOT_FOUND) - .value("OUT_OF_BOUNDS", InferenceEngine::StatusCode::OUT_OF_BOUNDS) - .value("UNEXPECTED", InferenceEngine::StatusCode::UNEXPECTED) - .value("REQUEST_BUSY", InferenceEngine::StatusCode::REQUEST_BUSY) - .value("RESULT_NOT_READY", InferenceEngine::StatusCode::RESULT_NOT_READY) - .value("NOT_ALLOCATED", InferenceEngine::StatusCode::NOT_ALLOCATED) - .value("INFER_NOT_STARTED", InferenceEngine::StatusCode::INFER_NOT_STARTED) - .value("NETWORK_NOT_READ", InferenceEngine::StatusCode::NETWORK_NOT_READ) - .export_values(); - - py::enum_(m, "WaitMode") - .value("RESULT_READY", InferenceEngine::IInferRequest::WaitMode::RESULT_READY) - .value("STATUS_ONLY", InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY) - .export_values(); regclass_graph_PyRTMap(m); regmodule_graph_types(m); From b5d8303c51c3935b226e6f7bd49b6e0cc92349c6 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Tue, 9 Nov 2021 19:31:34 +0300 Subject: [PATCH 25/46] add inputs-outputs to function --- .../python/src/pyopenvino/graph/function.cpp | 46 +++++++++++++++++++ .../python/tests/test_ngraph/test_basic.py | 6 +++ 2 files changed, 52 insertions(+) diff --git a/runtime/bindings/python/src/pyopenvino/graph/function.cpp b/runtime/bindings/python/src/pyopenvino/graph/function.cpp index 37afccb684e159..27e3d18b0ec079 100644 --- a/runtime/bindings/python/src/pyopenvino/graph/function.cpp +++ b/runtime/bindings/python/src/pyopenvino/graph/function.cpp @@ -316,6 +316,42 @@ void regclass_graph_Function(py::module m) { ---------- is_dynamic : bool )"); + function.def("input", (ov::Output(ov::Function::*)()) & ov::Function::input); + + function.def("input", (ov::Output(ov::Function::*)(size_t)) & ov::Function::input, py::arg("i")); + + function.def("input", + (ov::Output(ov::Function::*)(const std::string&)) & ov::Function::input, + py::arg("tensor_name")); + + function.def("input", (ov::Output(ov::Function::*)() const) & ov::Function::input); + + function.def("input", + (ov::Output(ov::Function::*)(size_t) const) & ov::Function::input, + py::arg("i")); + + function.def("input", + (ov::Output(ov::Function::*)(const std::string&) const) & ov::Function::input, + py::arg("tensor_name")); + + function.def("output", (ov::Output(ov::Function::*)()) & ov::Function::output); + + function.def("output", (ov::Output(ov::Function::*)(size_t)) & ov::Function::output, py::arg("i")); + + function.def("output", + (ov::Output(ov::Function::*)(const std::string&)) & ov::Function::output, + py::arg("tensor_name")); + + function.def("output", (ov::Output(ov::Function::*)() const) & ov::Function::output); + + function.def("output", + (ov::Output(ov::Function::*)(size_t) const) & ov::Function::output, + py::arg("i")); + + function.def("output", + (ov::Output(ov::Function::*)(const std::string&) const) & ov::Function::output, + py::arg("tensor_name")); + function.def("__repr__", [](const ov::Function& self) { std::string class_name = py::cast(self).get_type().attr("__name__").cast(); std::stringstream shapes_ss; @@ -361,6 +397,16 @@ void regclass_graph_Function(py::module m) { return pybind_capsule; }); + function.def_property_readonly("inputs", + (std::vector>(ov::Function::*)()) & ov::Function::inputs); + function.def_property_readonly( + "inputs", + (std::vector>(ov::Function::*)() const) & ov::Function::inputs); + function.def_property_readonly("outputs", + (std::vector>(ov::Function::*)()) & ov::Function::outputs); + function.def_property_readonly( + "outputs", + (std::vector>(ov::Function::*)() const) & ov::Function::outputs); function.def_property_readonly("name", &ov::Function::get_name); function.def_property("friendly_name", &ov::Function::get_friendly_name, &ov::Function::set_friendly_name); } diff --git a/runtime/bindings/python/tests/test_ngraph/test_basic.py b/runtime/bindings/python/tests/test_ngraph/test_basic.py index e7bbfbe625485c..2d7deb1f47b9d1 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_basic.py +++ b/runtime/bindings/python/tests/test_ngraph/test_basic.py @@ -33,6 +33,12 @@ def test_ngraph_function_api(): assert op_types == ["Parameter", "Parameter", "Parameter", "Add", "Multiply", "Result"] assert len(function.get_ops()) == 6 assert function.get_output_size() == 1 + assert ["A", "B", "C"] == [input.get_node().friendly_name for input in function.inputs] + assert ["Result"] == [output.get_node().get_type_name() for output in function.outputs] + assert function.input(0).get_node().friendly_name == "A" + assert function.output(0).get_node().get_type_name() == "Result" + assert function.input(tensor_name="A").get_node().friendly_name == "A" + assert function.output().get_node().get_type_name() == "Result" assert function.get_output_op(0).get_type_name() == "Result" assert function.get_output_element_type(0) == parameter_a.get_element_type() assert list(function.get_output_shape(0)) == [2, 2] From a9a0e7f8d1135806443f0aa00b41ba664e7594cf Mon Sep 17 00:00:00 2001 From: Alexey Lebedev Date: Wed, 10 Nov 2021 15:54:39 +0300 Subject: [PATCH 26/46] update infer queue --- .../bindings/python/src/openvino/__init__.py | 6 +- .../bindings/python/src/openvino/ie_api.py | 7 +- .../src/pyopenvino/core/async_infer_queue.cpp | 210 ++++++++++++++++ ..._infer_queue.hpp => async_infer_queue.hpp} | 2 +- .../python/src/pyopenvino/core/common.cpp | 6 +- .../python/src/pyopenvino/core/common.hpp | 3 +- .../src/pyopenvino/core/ie_infer_queue.cpp | 228 ------------------ .../python/src/pyopenvino/pyopenvino.cpp | 4 +- .../test_infer_request.py | 21 +- 9 files changed, 246 insertions(+), 241 deletions(-) create mode 100644 runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp rename runtime/bindings/python/src/pyopenvino/core/{ie_infer_queue.hpp => async_infer_queue.hpp} (77%) delete mode 100644 runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.cpp diff --git a/runtime/bindings/python/src/openvino/__init__.py b/runtime/bindings/python/src/openvino/__init__.py index 56e7605c38699a..aea37b7c58cff4 100644 --- a/runtime/bindings/python/src/openvino/__init__.py +++ b/runtime/bindings/python/src/openvino/__init__.py @@ -36,7 +36,7 @@ from openvino.pyopenvino import DataPtr from openvino.pyopenvino import TensorDesc from openvino.pyopenvino import get_version -#from openvino.pyopenvino import InferQueue +from openvino.pyopenvino import AsyncInferQueue from openvino.pyopenvino import InferRequest # TODO: move to ie_api? from openvino.pyopenvino import Blob from openvino.pyopenvino import PreProcessInfo @@ -83,5 +83,5 @@ # Patching InferRequest InferRequest.infer = infer InferRequest.start_async = start_async -# Patching InferQueue -#InferQueue.async_infer = async_infer +# Patching AsyncInferQueue +AsyncInferQueue.start_async = start_async diff --git a/runtime/bindings/python/src/openvino/ie_api.py b/runtime/bindings/python/src/openvino/ie_api.py index af4d0e97f0625b..030260ea64c4f5 100644 --- a/runtime/bindings/python/src/openvino/ie_api.py +++ b/runtime/bindings/python/src/openvino/ie_api.py @@ -57,8 +57,11 @@ def infer_new_request(exec_net: ExecutableNetwork, inputs: dict = None) -> List[ return [copy.deepcopy(tensor.data) for tensor in res] # flake8: noqa: D102 -def start_async(request: InferRequest, inputs: dict = None) -> None: # type: ignore - request._start_async(inputs=normalize_inputs(inputs if inputs is not None else {})) +def start_async(request: InferRequest, inputs: dict = None, userdata = None) -> None: # type: ignore + if userdata: + request._start_async(inputs=normalize_inputs(inputs if inputs is not None else {}), userdata=userdata) + else: + request._start_async(inputs=normalize_inputs(inputs if inputs is not None else {})) # flake8: noqa: C901 # Dispatch Blob types on Python side. diff --git a/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp b/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp new file mode 100644 index 00000000000000..7bb5fd7acb6370 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp @@ -0,0 +1,210 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "pyopenvino/core/async_infer_queue.hpp" + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "pyopenvino/core/common.hpp" +#include "pyopenvino/core/infer_request.hpp" + +#define INVALID_ID -1 + +namespace py = pybind11; + +class AsyncInferQueue { +public: + AsyncInferQueue(std::vector requests, + std::queue idle_handles, + std::vector user_ids) + : _requests(requests), + _idle_handles(idle_handles), + _user_ids(user_ids) { + this->setDefaultCallbacks(); + _last_id = -1; + } + + ~AsyncInferQueue() { + _requests.clear(); + } + + bool _is_ready() { + py::gil_scoped_release release; + std::unique_lock lock(_mutex); + _cv.wait(lock, [this] { + return !(_idle_handles.empty()); + }); + + return !(_idle_handles.empty()); + } + + size_t getIdleRequestId() { + // Wait for any of _idle_handles + py::gil_scoped_release release; + std::unique_lock lock(_mutex); + _cv.wait(lock, [this] { + return !(_idle_handles.empty()); + }); + + size_t idle_request_id = _idle_handles.front(); + _idle_handles.pop(); + + return idle_request_id; + } + + void waitAll() { + // Wait for all requests to return with callback thus updating + // _idle_handles so it matches the size of requests + py::gil_scoped_release release; + std::unique_lock lock(_mutex); + _cv.wait(lock, [this] { + return _idle_handles.size() == _requests.size(); + }); + } + + void setDefaultCallbacks() { + for (size_t handle = 0; handle < _requests.size(); handle++) { + _requests[handle]._request.set_callback([this, handle /* ... */](std::exception_ptr exception_ptr) { + _requests[handle]._end_time = Time::now(); + // Add idle handle to queue + _idle_handles.push(handle); + // Notify locks in getIdleRequestId() or waitAll() functions + _cv.notify_one(); + }); + } + } + + void setCustomCallbacks(py::function f_callback) { + for (size_t handle = 0; handle < _requests.size(); handle++) { + _requests[handle]._request.set_callback([this, f_callback, handle](std::exception_ptr exception_ptr) { + _requests[handle]._end_time = Time::now(); + try { + if (exception_ptr) { + std::rethrow_exception(exception_ptr); + } + } catch (const std::exception& e) { + IE_THROW() << "Caught exception: " << e.what(); + } + // Acquire GIL, execute Python function + py::gil_scoped_acquire acquire; + f_callback(_requests[handle], _user_ids[handle]); + // Add idle handle to queue + _idle_handles.push(handle); + // Notify locks in getIdleRequestId() or waitAll() functions + _cv.notify_one(); + }); + } + } + + std::vector _requests; + std::queue _idle_handles; + std::vector _user_ids; // user ID can be any Python object + size_t _last_id; + std::mutex _mutex; + std::condition_variable _cv; +}; + +void regclass_AsyncInferQueue(py::module m) { + py::class_> cls(m, "AsyncInferQueue"); + + cls.def(py::init([](ov::runtime::ExecutableNetwork& net, size_t jobs) { + if (jobs == 0) { + jobs = (size_t)Common::get_optimal_number_of_requests(net); + } + + std::vector requests; + std::queue idle_handles; + std::vector user_ids(jobs); + + for (size_t handle = 0; handle < jobs; handle++) { + auto request = InferRequestWrapper(net.create_infer_request()); + // Get Inputs and Outputs info from executable network + request._inputs = net.inputs(); + request._outputs = net.outputs(); + + requests.push_back(request); + idle_handles.push(handle); + } + + return new AsyncInferQueue(requests, idle_handles, user_ids); + }), + py::arg("network"), + py::arg("jobs") = 0); + + cls.def( + "_start_async", + [](AsyncInferQueue& self, const py::dict inputs, py::object userdata) { + // getIdleRequestId function has an intention to block InferQueue + // until there is at least one idle (free to use) InferRequest + auto handle = self.getIdleRequestId(); + // Set new inputs label/id from user + self._user_ids[handle] = userdata; + // Update inputs if there are any + if (!inputs.empty()) { + if (py::isinstance(inputs.begin()->first)) { + auto inputs_map = Common::cast_to_tensor_name_map(inputs); + for (auto&& input : inputs_map) { + self._requests[handle]._request.set_tensor(input.first, input.second); + } + } else if (py::isinstance(inputs.begin()->first)) { + auto inputs_map = Common::cast_to_tensor_index_map(inputs); + for (auto&& input : inputs_map) { + self._requests[handle]._request.set_input_tensor(input.first, input.second); + } + } + } + // Now GIL can be released - we are NOT working with Python objects in this block + { + py::gil_scoped_release release; + self._requests[handle]._start_time = Time::now(); + // Start InferRequest in asynchronus mode + self._requests[handle]._request.start_async(); + } + }, + py::arg("inputs"), + py::arg("userdata")); + + cls.def("is_ready", [](AsyncInferQueue& self) { + return self._is_ready(); + }); + + cls.def("wait_all", [](AsyncInferQueue& self) { + return self.waitAll(); + }); + + cls.def("get_idle_request_id", [](AsyncInferQueue& self) { + return self.getIdleRequestId(); + }); + + cls.def("set_infer_callback", [](AsyncInferQueue& self, py::function f_callback) { + self.setCustomCallbacks(f_callback); + }); + + cls.def("__len__", [](AsyncInferQueue& self) { + return self._requests.size(); + }); + + cls.def( + "__iter__", + [](AsyncInferQueue& self) { + return py::make_iterator(self._requests.begin(), self._requests.end()); + }, + py::keep_alive<0, 1>()); /* Keep set alive while iterator is used */ + + cls.def("__getitem__", [](AsyncInferQueue& self, size_t i) { + return self._requests[i]; + }); + + cls.def_property_readonly("userdata", [](AsyncInferQueue& self) { + return self._user_ids; + }); + } diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.hpp b/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.hpp similarity index 77% rename from runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.hpp rename to runtime/bindings/python/src/pyopenvino/core/async_infer_queue.hpp index 23aa72fd072496..3ed1122ba126d7 100644 --- a/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.hpp +++ b/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.hpp @@ -7,4 +7,4 @@ namespace py = pybind11; -void regclass_InferQueue(py::module m); +void regclass_AsyncInferQueue(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/core/common.cpp b/runtime/bindings/python/src/pyopenvino/core/common.cpp index b42f4d14419594..8a15aaf6b92598 100644 --- a/runtime/bindings/python/src/pyopenvino/core/common.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/common.cpp @@ -321,13 +321,13 @@ void set_request_blobs(InferenceEngine::InferRequest& request, const py::dict& d } } -uint32_t get_optimal_number_of_requests(const InferenceEngine::ExecutableNetwork& actual) { +uint32_t get_optimal_number_of_requests(const ov::runtime::ExecutableNetwork& actual) { try { - auto parameter_value = actual.GetMetric(METRIC_KEY(SUPPORTED_METRICS)); + auto parameter_value = actual.get_metric(METRIC_KEY(SUPPORTED_METRICS)); auto supported_metrics = parameter_value.as>(); const std::string key = METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS); if (std::find(supported_metrics.begin(), supported_metrics.end(), key) != supported_metrics.end()) { - parameter_value = actual.GetMetric(key); + parameter_value = actual.get_metric(key); if (parameter_value.is()) return parameter_value.as(); else diff --git a/runtime/bindings/python/src/pyopenvino/core/common.hpp b/runtime/bindings/python/src/pyopenvino/core/common.hpp index d4be9bd2a77995..867330640f3cf2 100644 --- a/runtime/bindings/python/src/pyopenvino/core/common.hpp +++ b/runtime/bindings/python/src/pyopenvino/core/common.hpp @@ -15,6 +15,7 @@ #include "Python.h" #include "ie_common.h" #include "openvino/runtime/tensor.hpp" +#include "openvino/runtime/executable_network.hpp" #include "pyopenvino/core/containers.hpp" namespace py = pybind11; @@ -60,5 +61,5 @@ namespace Common void set_request_blobs(InferenceEngine::InferRequest& request, const py::dict& dictonary); - uint32_t get_optimal_number_of_requests(const InferenceEngine::ExecutableNetwork& actual); + uint32_t get_optimal_number_of_requests(const ov::runtime::ExecutableNetwork& actual); }; // namespace Common diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.cpp deleted file mode 100644 index e80cd33105f01b..00000000000000 --- a/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.cpp +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright (C) 2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -#include "pyopenvino/core/ie_infer_queue.hpp" - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "pyopenvino/core/common.hpp" -#include "pyopenvino/core/infer_request.hpp" - -#define INVALID_ID -1 - -namespace py = pybind11; - -class InferQueue { -public: - InferQueue(std::vector requests, - std::queue idle_handles, - std::vector user_ids) - : _requests(requests), - _idle_handles(idle_handles), - _user_ids(user_ids) { - this->setDefaultCallbacks(); - _last_id = -1; - } - - ~InferQueue() { - _requests.clear(); - } - - bool _is_ready() { - py::gil_scoped_release release; - std::unique_lock lock(_mutex); - _cv.wait(lock, [this] { - return !(_idle_handles.empty()); - }); - - return !(_idle_handles.empty()); - } - - py::dict _getIdleRequestInfo() { - py::gil_scoped_release release; - std::unique_lock lock(_mutex); - _cv.wait(lock, [this] { - return !(_idle_handles.empty()); - }); - - size_t request_id = _idle_handles.front(); - - py::dict request_info = py::dict(); - request_info["id"] = request_id; - // request_info["status"] = true; // TODO - - return request_info; - } - - size_t getIdleRequestId() { - // Wait for any of _idle_handles - py::gil_scoped_release release; - std::unique_lock lock(_mutex); - _cv.wait(lock, [this] { - return !(_idle_handles.empty()); - }); - - size_t idle_request_id = _idle_handles.front(); - _idle_handles.pop(); - - return idle_request_id; - } - - std::vector waitAll() { - // Wait for all requests to return with callback thus updating - // _idle_handles so it matches the size of requests - py::gil_scoped_release release; - std::unique_lock lock(_mutex); - _cv.wait(lock, [this] { - return _idle_handles.size() == _requests.size(); - }); - - std::vector statuses; - - for (size_t handle = 0; handle < _requests.size(); handle++) { - statuses.push_back(_requests[handle]._request.wait_for(std::chrono::milliseconds(0))); - } - - return statuses; - } - - void setDefaultCallbacks() { - for (size_t handle = 0; handle < _requests.size(); handle++) { - _requests[handle]._request.set_callback([this, handle /* ... */](std::exception_ptr exception_ptr) { - _requests[handle]._end_time = Time::now(); - // Add idle handle to queue - _idle_handles.push(handle); - // Notify locks in getIdleRequestId() or waitAll() functions - _cv.notify_one(); - }); - } - } - - void setCustomCallbacks(py::function f_callback) { - for (size_t handle = 0; handle < _requests.size(); handle++) { - _requests[handle]._request.set_callback([this, f_callback, handle](std::exception_ptr exception_ptr) { - _requests[handle]._end_time = Time::now(); - try { - if (exception_ptr) { - std::rethrow_exception(exception_ptr); - } - } catch (const std::exception& e) { - IE_THROW() << "Caught exception: " << e.what(); - } - // Acquire GIL, execute Python function - py::gil_scoped_acquire acquire; - f_callback(_requests[handle], _user_ids[handle]); - // Add idle handle to queue - _idle_handles.push(handle); - // Notify locks in getIdleRequestId() or waitAll() functions - _cv.notify_one(); - }); - } - } - - std::vector _requests; - std::queue _idle_handles; - std::vector _user_ids; // user ID can be any Python object - size_t _last_id; - std::mutex _mutex; - std::condition_variable _cv; -}; - -// void regclass_InferQueue(py::module m) { -// py::class_> cls(m, "InferQueue"); - -// cls.def(py::init([](InferenceEngine::ExecutableNetwork& net, size_t jobs) { -// if (jobs == 0) { -// const InferenceEngine::ExecutableNetwork& _net = net; -// jobs = (size_t)Common::get_optimal_number_of_requests(_net); -// } - -// std::vector requests; -// std::queue idle_handles; -// std::vector user_ids(jobs); - -// for (size_t handle = 0; handle < jobs; handle++) { -// auto request = InferRequestWrapper(net.CreateInferRequest()); -// // Get Inputs and Outputs info from executable network -// request._inputsInfo = net.GetInputsInfo(); -// request._outputsInfo = net.GetOutputsInfo(); - -// requests.push_back(request); -// idle_handles.push(handle); -// } - -// return new InferQueue(requests, idle_handles, user_ids); -// }), -// py::arg("network"), -// py::arg("jobs") = 0); - -// cls.def( -// "_async_infer", -// [](InferQueue& self, const py::dict inputs, py::object userdata) { -// // getIdleRequestId function has an intention to block InferQueue -// // until there is at least one idle (free to use) InferRequest -// auto handle = self.getIdleRequestId(); -// // Set new inputs label/id from user -// self._user_ids[handle] = userdata; -// // Update inputs of picked InferRequest -// if (!inputs.empty()) { -// Common::set_request_blobs(self._requests[handle]._request, inputs); -// } -// // Now GIL can be released - we are NOT working with Python objects in this block -// { -// py::gil_scoped_release release; -// self._requests[handle]._start_time = Time::now(); -// // Start InferRequest in asynchronus mode -// self._requests[handle]._request.start_async(); -// } -// }, -// py::arg("inputs"), -// py::arg("userdata")); - -// cls.def("is_ready", [](InferQueue& self) { -// return self._is_ready(); -// }); - -// cls.def("wait_all", [](InferQueue& self) { -// return self.waitAll(); -// }); - -// cls.def("get_idle_request_info", [](InferQueue& self) { -// return self._getIdleRequestInfo(); -// }); - -// cls.def("set_infer_callback", [](InferQueue& self, py::function f_callback) { -// self.setCustomCallbacks(f_callback); -// }); - -// cls.def("__len__", [](InferQueue& self) { -// return self._requests.size(); -// }); - -// cls.def( -// "__iter__", -// [](InferQueue& self) { -// return py::make_iterator(self._requests.begin(), self._requests.end()); -// }, -// py::keep_alive<0, 1>()); /* Keep set alive while iterator is used */ - -// cls.def("__getitem__", [](InferQueue& self, size_t i) { -// return self._requests[i]; -// }); - -// cls.def_property_readonly("userdata", [](InferQueue& self) { -// return self._user_ids; -// }); -// } diff --git a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp index d0088018454005..1c00a438ff6f08 100644 --- a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -24,7 +24,7 @@ #include "pyopenvino/core/executable_network.hpp" #include "pyopenvino/core/ie_blob.hpp" #include "pyopenvino/core/ie_data.hpp" -#include "pyopenvino/core/ie_infer_queue.hpp" +#include "pyopenvino/core/async_infer_queue.hpp" #include "pyopenvino/core/ie_input_info.hpp" #include "pyopenvino/core/ie_network.hpp" #include "pyopenvino/core/ie_parameter.hpp" @@ -127,7 +127,7 @@ PYBIND11_MODULE(pyopenvino, m) { regclass_Version(m); regclass_Parameter(m); regclass_InputInfo(m); - // regclass_InferQueue(m); + regclass_AsyncInferQueue(m); regclass_ProfilingInfo(m); regclass_PreProcessInfo(m); diff --git a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py index 0d058b518fb451..7a9a0b0c2978aa 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py @@ -7,7 +7,7 @@ import datetime from ..conftest import image_path, model_path -from openvino import Core, Tensor, ProfilingInfo +from openvino import Core, AsyncInferQueue, Tensor, ProfilingInfo is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD" test_net_xml, test_net_bin = model_path(is_myriad) @@ -166,3 +166,22 @@ def test_infer_mixed_keys(device): with pytest.raises(TypeError) as e: request.infer({0: tensor, "fc_out": tensor2}) assert "incompatible function arguments!" in str(e.value) + + +def test_infer_queue(device): + jobs = 8 + core = Core() + func = core.read_model(test_net_xml, test_net_bin) + exec_net = core.compile_model(func, device) + infer_queue = AsyncInferQueue(exec_net, jobs) + + def callback(request, userdata): + userdata["finished"] = True + + img = read_image() + infer_queue.set_infer_callback(callback) + assert infer_queue.is_ready + for i in range(jobs): + infer_queue.start_async({"data": img}, {"finished": False}) + infer_queue.wait_all() + assert all([data["finished"] for data in infer_queue.userdata]) From 90b49f9ca932d7d5bf050496b7827ef3573b7fee Mon Sep 17 00:00:00 2001 From: Alexey Lebedev Date: Wed, 10 Nov 2021 17:30:25 +0300 Subject: [PATCH 27/46] fix code style --- .../src/pyopenvino/core/async_infer_queue.cpp | 160 +++++++++--------- .../python/src/pyopenvino/pyopenvino.cpp | 2 +- 2 files changed, 81 insertions(+), 81 deletions(-) diff --git a/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp b/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp index 7bb5fd7acb6370..9319dd2404f67e 100644 --- a/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp @@ -24,8 +24,8 @@ namespace py = pybind11; class AsyncInferQueue { public: AsyncInferQueue(std::vector requests, - std::queue idle_handles, - std::vector user_ids) + std::queue idle_handles, + std::vector user_ids) : _requests(requests), _idle_handles(idle_handles), _user_ids(user_ids) { @@ -114,40 +114,40 @@ class AsyncInferQueue { }; void regclass_AsyncInferQueue(py::module m) { - py::class_> cls(m, "AsyncInferQueue"); - - cls.def(py::init([](ov::runtime::ExecutableNetwork& net, size_t jobs) { - if (jobs == 0) { - jobs = (size_t)Common::get_optimal_number_of_requests(net); - } - - std::vector requests; - std::queue idle_handles; - std::vector user_ids(jobs); - - for (size_t handle = 0; handle < jobs; handle++) { - auto request = InferRequestWrapper(net.create_infer_request()); - // Get Inputs and Outputs info from executable network - request._inputs = net.inputs(); - request._outputs = net.outputs(); - - requests.push_back(request); - idle_handles.push(handle); - } - - return new AsyncInferQueue(requests, idle_handles, user_ids); - }), - py::arg("network"), - py::arg("jobs") = 0); - - cls.def( - "_start_async", - [](AsyncInferQueue& self, const py::dict inputs, py::object userdata) { - // getIdleRequestId function has an intention to block InferQueue - // until there is at least one idle (free to use) InferRequest - auto handle = self.getIdleRequestId(); - // Set new inputs label/id from user - self._user_ids[handle] = userdata; + py::class_> cls(m, "AsyncInferQueue"); + + cls.def(py::init([](ov::runtime::ExecutableNetwork& net, size_t jobs) { + if (jobs == 0) { + jobs = (size_t)Common::get_optimal_number_of_requests(net); + } + + std::vector requests; + std::queue idle_handles; + std::vector user_ids(jobs); + + for (size_t handle = 0; handle < jobs; handle++) { + auto request = InferRequestWrapper(net.create_infer_request()); + // Get Inputs and Outputs info from executable network + request._inputs = net.inputs(); + request._outputs = net.outputs(); + + requests.push_back(request); + idle_handles.push(handle); + } + + return new AsyncInferQueue(requests, idle_handles, user_ids); + }), + py::arg("network"), + py::arg("jobs") = 0); + + cls.def( + "_start_async", + [](AsyncInferQueue& self, const py::dict inputs, py::object userdata) { + // getIdleRequestId function has an intention to block InferQueue + // until there is at least one idle (free to use) InferRequest + auto handle = self.getIdleRequestId(); + // Set new inputs label/id from user + self._user_ids[handle] = userdata; // Update inputs if there are any if (!inputs.empty()) { if (py::isinstance(inputs.begin()->first)) { @@ -162,49 +162,49 @@ void regclass_AsyncInferQueue(py::module m) { } } } - // Now GIL can be released - we are NOT working with Python objects in this block + // Now GIL can be released - we are NOT working with Python objects in this block { - py::gil_scoped_release release; - self._requests[handle]._start_time = Time::now(); - // Start InferRequest in asynchronus mode - self._requests[handle]._request.start_async(); + py::gil_scoped_release release; + self._requests[handle]._start_time = Time::now(); + // Start InferRequest in asynchronus mode + self._requests[handle]._request.start_async(); } - }, - py::arg("inputs"), - py::arg("userdata")); - - cls.def("is_ready", [](AsyncInferQueue& self) { - return self._is_ready(); - }); - - cls.def("wait_all", [](AsyncInferQueue& self) { - return self.waitAll(); - }); - - cls.def("get_idle_request_id", [](AsyncInferQueue& self) { - return self.getIdleRequestId(); - }); - - cls.def("set_infer_callback", [](AsyncInferQueue& self, py::function f_callback) { - self.setCustomCallbacks(f_callback); - }); - - cls.def("__len__", [](AsyncInferQueue& self) { - return self._requests.size(); - }); - - cls.def( - "__iter__", - [](AsyncInferQueue& self) { - return py::make_iterator(self._requests.begin(), self._requests.end()); - }, - py::keep_alive<0, 1>()); /* Keep set alive while iterator is used */ - - cls.def("__getitem__", [](AsyncInferQueue& self, size_t i) { - return self._requests[i]; - }); - - cls.def_property_readonly("userdata", [](AsyncInferQueue& self) { - return self._user_ids; - }); - } + }, + py::arg("inputs"), + py::arg("userdata")); + + cls.def("is_ready", [](AsyncInferQueue& self) { + return self._is_ready(); + }); + + cls.def("wait_all", [](AsyncInferQueue& self) { + return self.waitAll(); + }); + + cls.def("get_idle_request_id", [](AsyncInferQueue& self) { + return self.getIdleRequestId(); + }); + + cls.def("set_infer_callback", [](AsyncInferQueue& self, py::function f_callback) { + self.setCustomCallbacks(f_callback); + }); + + cls.def("__len__", [](AsyncInferQueue& self) { + return self._requests.size(); + }); + + cls.def( + "__iter__", + [](AsyncInferQueue& self) { + return py::make_iterator(self._requests.begin(), self._requests.end()); + }, + py::keep_alive<0, 1>()); /* Keep set alive while iterator is used */ + + cls.def("__getitem__", [](AsyncInferQueue& self, size_t i) { + return self._requests[i]; + }); + + cls.def_property_readonly("userdata", [](AsyncInferQueue& self) { + return self._user_ids; + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp index 1c00a438ff6f08..c1529428789db9 100644 --- a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -19,12 +19,12 @@ #if defined(NGRAPH_ONNX_FRONTEND_ENABLE) # include "pyopenvino/graph/onnx_import/onnx_import.hpp" #endif +#include "pyopenvino/core/async_infer_queue.hpp" #include "pyopenvino/core/containers.hpp" #include "pyopenvino/core/core.hpp" #include "pyopenvino/core/executable_network.hpp" #include "pyopenvino/core/ie_blob.hpp" #include "pyopenvino/core/ie_data.hpp" -#include "pyopenvino/core/async_infer_queue.hpp" #include "pyopenvino/core/ie_input_info.hpp" #include "pyopenvino/core/ie_network.hpp" #include "pyopenvino/core/ie_parameter.hpp" From 22b0a756ebf4dd3ad93812019f0578aa4dc492b4 Mon Sep 17 00:00:00 2001 From: jiwaszki Date: Wed, 10 Nov 2021 16:16:35 +0000 Subject: [PATCH 28/46] Hot-fix CPU plugin with precision --- inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp index af900f7170db91..32d1f8494ffab3 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp @@ -305,7 +305,7 @@ InferenceEngine::Blob::Ptr MKLDNNPlugin::MKLDNNInferRequest::GetBlob(const std:: desc.getShape().getRank())) : MemoryDescUtils::convertToTensorDesc(desc); const auto &tensorDesc = data->getTensorDesc(); - if (expectedTensorDesc.getPrecision() != tensorDesc.getPrecision()) { + if (expectedTensorDesc.getPrecision() != normalizeToSupportedPrecision(tensorDesc.getPrecision())) { IE_THROW(ParameterMismatch) << "Network input and output use the same name: " << name << " but expect blobs with different precision: " << tensorDesc.getPrecision() << " for input and " << expectedTensorDesc.getPrecision() From 1cbf144f077259aa2a85461fb026fea958169a79 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Thu, 11 Nov 2021 01:12:30 +0300 Subject: [PATCH 29/46] fix start_async --- .../bindings/python/src/openvino/ie_api.py | 8 +-- .../src/pyopenvino/core/infer_request.cpp | 51 +++++++++---------- .../src/pyopenvino/core/infer_request.hpp | 4 +- .../test_infer_request.py | 29 +++++++++-- 4 files changed, 56 insertions(+), 36 deletions(-) diff --git a/runtime/bindings/python/src/openvino/ie_api.py b/runtime/bindings/python/src/openvino/ie_api.py index af4d0e97f0625b..9199ccc1a273db 100644 --- a/runtime/bindings/python/src/openvino/ie_api.py +++ b/runtime/bindings/python/src/openvino/ie_api.py @@ -43,8 +43,8 @@ def normalize_inputs(py_dict: dict) -> dict: for k, v in py_dict.items()} # flake8: noqa: D102 -def infer(request: InferRequest, inputs: dict = None) -> np.ndarray: - res = request._infer(inputs=normalize_inputs(inputs if inputs is not None else {})) +def infer(request: InferRequest, inputs: dict = {}) -> np.ndarray: + res = request._infer(inputs=normalize_inputs(inputs)) # Required to return list since np.ndarray forces all of tensors data to match in # dimensions. This results in errors when running ops like variadic split. return [copy.deepcopy(tensor.data) for tensor in res] @@ -57,8 +57,8 @@ def infer_new_request(exec_net: ExecutableNetwork, inputs: dict = None) -> List[ return [copy.deepcopy(tensor.data) for tensor in res] # flake8: noqa: D102 -def start_async(request: InferRequest, inputs: dict = None) -> None: # type: ignore - request._start_async(inputs=normalize_inputs(inputs if inputs is not None else {})) +def start_async(request: InferRequest, inputs: dict = {}, userdata: dict = None) -> None: # type: ignore + request._start_async(inputs=normalize_inputs(inputs), userdata=userdata) # flake8: noqa: C901 # Dispatch Blob types on Python side. diff --git a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp index 358ffc0c153569..31a22431db8ae7 100644 --- a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp @@ -81,9 +81,7 @@ void regclass_InferRequest(py::module m) { cls.def( "_start_async", - [](InferRequestWrapper& self, const py::dict& inputs) { - py::gil_scoped_release release; - + [](InferRequestWrapper& self, const py::dict& inputs, py::object& userdata) { // Update inputs if there are any if (!inputs.empty()) { if (py::isinstance(inputs.begin()->first)) { @@ -98,28 +96,22 @@ void regclass_InferRequest(py::module m) { } } } - // TODO: check for None so next async infer userdata can be updated - // if (!userdata.empty()) - // { - // if (user_callback_defined) - // { - // self._request.SetCompletionCallback([self, userdata]() { - // // py::gil_scoped_acquire acquire; - // auto statusCode = const_cast(self).Wait( - // InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); - // self._request.user_callback(self, statusCode, userdata); - // // py::gil_scoped_release release; - // }); - // } - // else - // { - // py::print("There is no callback function!"); - // } - // } + if (userdata != py::none()) { + if (self.user_callback_defined) { + self.userdata = userdata; + } + else { + PyErr_WarnEx(PyExc_RuntimeWarning, + "There is no callback function!", + 1); + } + } + py::gil_scoped_release release; self._start_time = Time::now(); self._request.start_async(); }, - py::arg("inputs")); + py::arg("inputs"), + py::arg("userdata")); cls.def("cancel", [](InferRequestWrapper& self) { self._request.cancel(); @@ -140,7 +132,9 @@ void regclass_InferRequest(py::module m) { cls.def( "set_callback", - [](InferRequestWrapper& self, py::function f_callback) { + [](InferRequestWrapper& self, py::function f_callback, py::object& userdata) { + self.userdata = userdata; + self.user_callback_defined = true; self._request.set_callback([&self, f_callback](std::exception_ptr exception_ptr) { self._end_time = Time::now(); try { @@ -148,14 +142,15 @@ void regclass_InferRequest(py::module m) { std::rethrow_exception(exception_ptr); } } catch (const std::exception& e) { - IE_THROW() << "Caught exception: " << e.what(); + throw ov::Exception("Caught exception: " + std::string(e.what())); } // Acquire GIL, execute Python function py::gil_scoped_acquire acquire; - f_callback(exception_ptr); + f_callback(self.userdata); }); }, - py::arg("f_callback")); + py::arg("f_callback"), + py::arg("userdata")); cls.def( "get_tensor", @@ -258,6 +253,10 @@ void regclass_InferRequest(py::module m) { return self._request.get_profiling_info(); }); + cls.def_property_readonly("userdata", [](InferRequestWrapper& self) { + return self.userdata; + }); + cls.def_property_readonly("inputs", [](InferRequestWrapper& self) { return self._inputs; }); diff --git a/runtime/bindings/python/src/pyopenvino/core/infer_request.hpp b/runtime/bindings/python/src/pyopenvino/core/infer_request.hpp index e81261a54a608c..3ea9859db1fcc8 100644 --- a/runtime/bindings/python/src/pyopenvino/core/infer_request.hpp +++ b/runtime/bindings/python/src/pyopenvino/core/infer_request.hpp @@ -28,8 +28,8 @@ class InferRequestWrapper { } // ~InferRequestWrapper() = default; - // bool user_callback_defined; - // py::function user_callback; + bool user_callback_defined = false; + py::object userdata; double get_latency() { auto execTime = std::chrono::duration_cast(_end_time - _start_time); diff --git a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py index 0d058b518fb451..98d48b99fbcb1c 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py @@ -5,6 +5,7 @@ import os import pytest import datetime +import time from ..conftest import image_path, model_path from openvino import Core, Tensor, ProfilingInfo @@ -133,10 +134,6 @@ def test_cancel(device): img = read_image() request = exec_net.create_infer_request() - def callback(e): - raise Exception(e) - - request.set_callback(callback) request.start_async({0: img}) request.cancel() with pytest.raises(RuntimeError) as e: @@ -150,6 +147,30 @@ def callback(e): assert "[ INFER_CANCELLED ]" in str(e.value) +def test_start_async(device): + core = Core() + func = core.read_model(test_net_xml, test_net_bin) + exec_net = core.compile_model(func, device) + img = read_image() + jobs = 3 + requests = [] + for i in range(jobs): + requests.append(exec_net.create_infer_request()) + + def callback(callbacks_info): + time.sleep(0.01) + callbacks_info['finished'] += 1 + + callbacks_info = {} + callbacks_info['finished'] = 0 + for request in requests: + request.set_callback(callback, callbacks_info) + request.start_async({0: img}) + for request in requests: + request.wait() + assert callbacks_info['finished'] == jobs + + def test_infer_mixed_keys(device): core = Core() func = core.read_model(test_net_xml, test_net_bin) From 530d164cdc4b39019e2b15cd5f26c35b3f06c812 Mon Sep 17 00:00:00 2001 From: Victor Kuznetsov Date: Wed, 10 Nov 2021 16:07:08 +0300 Subject: [PATCH 30/46] add performance hint to time infer (#8480) --- tests/time_tests/src/timetests/timetest_infer.cpp | 3 +++ tests/time_tests/src/timetests/timetest_infer_cache.cpp | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/time_tests/src/timetests/timetest_infer.cpp b/tests/time_tests/src/timetests/timetest_infer.cpp index 83f47f837e1690..e4870ec500c4be 100644 --- a/tests/time_tests/src/timetests/timetest_infer.cpp +++ b/tests/time_tests/src/timetests/timetest_infer.cpp @@ -3,6 +3,7 @@ // #include +#include #include #include "common_utils.h" @@ -29,6 +30,8 @@ int runPipeline(const std::string &model, const std::string &device) { { SCOPED_TIMER(load_plugin); ie.GetVersions(device); + // enables performance hint for specified device + ie.SetConfig({{CONFIG_KEY(PERFORMANCE_HINT), CONFIG_VALUE(LATENCY)}}, device); } { SCOPED_TIMER(create_exenetwork); diff --git a/tests/time_tests/src/timetests/timetest_infer_cache.cpp b/tests/time_tests/src/timetests/timetest_infer_cache.cpp index f1c657f6aa463f..0b0ee0c28e1c7c 100644 --- a/tests/time_tests/src/timetests/timetest_infer_cache.cpp +++ b/tests/time_tests/src/timetests/timetest_infer_cache.cpp @@ -3,6 +3,7 @@ // #include +#include #include #include "common_utils.h" @@ -31,7 +32,8 @@ int runPipeline(const std::string &model, const std::string &device) { } { SCOPED_TIMER(load_network); - ie.SetConfig({{"CACHE_DIR", "models_cache"}}); + // enables cache + ie.SetConfig({{CONFIG_KEY(CACHE_DIR), "models_cache"}}); exeNetwork = ie.LoadNetwork(model, device); } { From ae7219943cd728ab60037b07aad0046a210483ab Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 10 Nov 2021 16:27:23 +0300 Subject: [PATCH 31/46] Updated common migration pipeline (#8176) * Updated common migration pipeline * Fixed merge issue * Added new model and extended example * Fixed typo * Added v10-v11 comparison --- .../docs/common_inference_pipeline.md | 108 +++++++++++++++++- .../docs/graph_construction.md | 12 ++ docs/migration_ov_2_0/docs/intro.md | 1 + docs/snippets/ie_common.cpp | 36 +++++- docs/snippets/ngraph.cpp | 48 ++++++++ docs/snippets/ov_common.cpp | 75 ++++++++++-- docs/snippets/ov_graph.cpp | 47 ++++++++ ngraph/core/include/openvino/core/version.hpp | 1 - 8 files changed, 313 insertions(+), 15 deletions(-) create mode 100644 docs/migration_ov_2_0/docs/graph_construction.md create mode 100644 docs/snippets/ngraph.cpp create mode 100644 docs/snippets/ov_graph.cpp diff --git a/docs/migration_ov_2_0/docs/common_inference_pipeline.md b/docs/migration_ov_2_0/docs/common_inference_pipeline.md index af2dbf25304116..2b842e2e9e125a 100644 --- a/docs/migration_ov_2_0/docs/common_inference_pipeline.md +++ b/docs/migration_ov_2_0/docs/common_inference_pipeline.md @@ -32,6 +32,8 @@ OpenVINOâ„¢ 2.0 API: @snippet snippets/ov_common.cpp ov_api_2_0:read_model +Read model has the same structure as in the example from [OpenVINOâ„¢ Graph Construction](@ref ov_graph_construction) guide. + ### 2.1 Configure Input and Output of the Model Inference Engine API: @@ -52,4 +54,108 @@ OpenVINOâ„¢ 2.0 API: @snippet snippets/ov_common.cpp ov_api_2_0:compile_model -## 5. TBD +## 4. Create an Inference Request + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:create_infer_request + +OpenVINOâ„¢ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:create_infer_request + +## 5. Prepare input + +### IR v10 + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:get_input_tensor + +OpenVINOâ„¢ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:get_input_tensor_v10 + +### IR v11 + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:get_input_tensor + +OpenVINOâ„¢ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:get_input_tensor_aligned + +### ONNX + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:get_input_tensor + +OpenVINOâ„¢ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:get_input_tensor_aligned + +### From Function + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:get_input_tensor + +OpenVINOâ„¢ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:get_input_tensor_aligned + +## 6. Start Inference + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:inference + +OpenVINOâ„¢ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:inference + + +## 7. Process the Inference Results + +### IR v10 + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:get_output_tensor + +OpenVINOâ„¢ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:get_output_tensor_v10 + +### IR v11 + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:get_output_tensor + +OpenVINOâ„¢ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:get_output_tensor_aligned + +### ONNX + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:get_output_tensor + +OpenVINOâ„¢ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:get_output_tensor_aligned + +### From Function + +Inference Engine API: + +@snippet snippets/ie_common.cpp ie:get_output_tensor + +OpenVINOâ„¢ 2.0 API: + +@snippet snippets/ov_common.cpp ov_api_2_0:get_output_tensor_aligned + diff --git a/docs/migration_ov_2_0/docs/graph_construction.md b/docs/migration_ov_2_0/docs/graph_construction.md new file mode 100644 index 00000000000000..f3fe2c7a1dce34 --- /dev/null +++ b/docs/migration_ov_2_0/docs/graph_construction.md @@ -0,0 +1,12 @@ +# OpenVINOâ„¢ graph construction {#ov_graph_construction} + +OpenVINOâ„¢ 2.0 includes nGraph engine in a common part. The `ngraph` namespace was changed to `ov`. +Code snippets below show how application code should be changed for migration to OpenVINOâ„¢ 2.0. + +nGraph API: + +@snippet snippets/ngraph.cpp ngraph:graph + +OpenVINOâ„¢ 2.0 API: + +@snippet snippets/ov_graph.cpp ov:graph diff --git a/docs/migration_ov_2_0/docs/intro.md b/docs/migration_ov_2_0/docs/intro.md index 5d89b7aff3d809..5afc4a11304ffb 100644 --- a/docs/migration_ov_2_0/docs/intro.md +++ b/docs/migration_ov_2_0/docs/intro.md @@ -9,4 +9,5 @@ The list with differences between APIs below: - Namespaces were aligned between components. Please look at next transition guides to understand how transit own application to OpenVINOâ„¢ API 2.0. + - [OpenVINOâ„¢ Graph Construction](@ref ov_graph_construction) - [OpenVINOâ„¢ Common Inference pipeline](@ref ov_inference_pipeline) diff --git a/docs/snippets/ie_common.cpp b/docs/snippets/ie_common.cpp index 6a558129243082..25cf78a0c43582 100644 --- a/docs/snippets/ie_common.cpp +++ b/docs/snippets/ie_common.cpp @@ -27,8 +27,29 @@ int main() { //! [ie:create_infer_request] //! [ie:get_input_tensor] - InferenceEngine::Blob::Ptr input_blob = infer_request.GetBlob(inputs.begin()->first); - // fill input blob + InferenceEngine::Blob::Ptr input_blob1 = infer_request.GetBlob(inputs.begin()->first); + // fill first blob + InferenceEngine::SizeVector dims1 = input_blob1->getTensorDesc().getDims(); + InferenceEngine::MemoryBlob::Ptr minput1 = InferenceEngine::as(input_blob1); + if (minput1) { + // locked memory holder should be alive all time while access to its + // buffer happens + auto minputHolder = minput1->wmap(); + // Original I64 precision was converted to I32 + auto data = minputHolder.as::value_type*>(); + // Fill data ... + } + InferenceEngine::Blob::Ptr input_blob2 = infer_request.GetBlob("data2"); + // fill first blob + InferenceEngine::MemoryBlob::Ptr minput2 = InferenceEngine::as(input_blob2); + if (minput2) { + // locked memory holder should be alive all time while access to its + // buffer happens + auto minputHolder = minput2->wmap(); + // Original I64 precision was converted to I32 + auto data = minputHolder.as::value_type*>(); + // Fill data ... + } //! [ie:get_input_tensor] //! [ie:inference] @@ -37,7 +58,16 @@ int main() { //! [ie:get_output_tensor] InferenceEngine::Blob::Ptr output_blob = infer_request.GetBlob(outputs.begin()->first); - // process output data + InferenceEngine::MemoryBlob::Ptr moutput = InferenceEngine::as(output_blob); + if (moutput) { + // locked memory holder should be alive all time while access to its + // buffer happens + auto minputHolder = moutput->rmap(); + // Original I64 precision was converted to I32 + auto data = + minputHolder.as::value_type*>(); + // process output data + } //! [ie:get_output_tensor] return 0; } diff --git a/docs/snippets/ngraph.cpp b/docs/snippets/ngraph.cpp new file mode 100644 index 00000000000000..931140f99d9519 --- /dev/null +++ b/docs/snippets/ngraph.cpp @@ -0,0 +1,48 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include +#include + +int main() { + //! [ngraph:graph] + // _____________ _____________ + // | Parameter | | Parameter | + // | data1 | | data2 | + // |___________| |___________| + // | | + // data1_t | | data2_t + // \ / + // \ / + // \ / + // ____\____/____ + // | Concat | + // | concat | + // |____________| + // | + // | concat_t + // | + // _______|_______ + // | Result | + // | result | + // |_____________| + auto data1 = std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 2, 2}); + data1->set_friendly_name("data1"); // operation name + data1->output(0).set_names({"data1_t"}); // tensor names + auto data2 = std::make_shared(ngraph::element::i64, ngraph::Shape{1, 2, 2, 2}); + data2->set_friendly_name("data2"); // operation name + data2->output(0).set_names({"data2_t"}); // tensor names + + auto concat = std::make_shared(ngraph::OutputVector{data1, data2}, 1); + concat->set_friendly_name("concat"); // operation name + concat->output(0).set_names({"concat_t"}); // tensor name + + auto result = std::make_shared(concat); + result->set_friendly_name("result"); // operation name + + auto f = std::make_shared(ngraph::ResultVector{result}, + ngraph::ParameterVector{data1, data2}, + "function_name"); + //! [ngraph:graph] + return 0; +} diff --git a/docs/snippets/ov_common.cpp b/docs/snippets/ov_common.cpp index 7cb9e344f7cbaa..1392e3a509850f 100644 --- a/docs/snippets/ov_common.cpp +++ b/docs/snippets/ov_common.cpp @@ -1,9 +1,61 @@ // Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include +#include #include +void inputs_v10(ov::runtime::InferRequest& infer_request) { + //! [ov_api_2_0:get_input_tensor_v10] + // Get input tensor by index + ov::runtime::Tensor input_tensor1 = infer_request.get_input_tensor(0); + // IR v10 works with converted precisions (i64 -> i32) + auto data1 = input_tensor1.data(); + // Fill first data ... + + // Get input tensor by tensor name + ov::runtime::Tensor input_tensor2 = infer_request.get_tensor("data2_t"); + // IR v10 works with converted precisions (i64 -> i32) + auto data2 = input_tensor1.data(); + // Fill first data ... + //! [ov_api_2_0:get_input_tensor_v10] +} + +void inputs_aligned(ov::runtime::InferRequest& infer_request) { + //! [ov_api_2_0:get_input_tensor_aligned] + // Get input tensor by index + ov::runtime::Tensor input_tensor1 = infer_request.get_input_tensor(0); + // Element types, names and layouts are aligned with framework + auto data1 = input_tensor1.data(); + // Fill first data ... + + // Get input tensor by tensor name + ov::runtime::Tensor input_tensor2 = infer_request.get_tensor("data2_t"); + // Element types, names and layouts are aligned with framework + auto data2 = input_tensor1.data(); + // Fill first data ... + //! [ov_api_2_0:get_input_tensor_aligned] +} + +void outputs_v10(ov::runtime::InferRequest& infer_request) { + //! [ov_api_2_0:get_output_tensor_v10] + // model has only one output + ov::runtime::Tensor output_tensor = infer_request.get_output_tensor(); + // IR v10 works with converted precisions (i64 -> i32) + auto out_data = output_tensor.data(); + // process output data + //! [ov_api_2_0:get_output_tensor_v10] +} + +void outputs_aligned(ov::runtime::InferRequest& infer_request) { + //! [ov_api_2_0:get_output_tensor_aligned] + // model has only one output + ov::runtime::Tensor output_tensor = infer_request.get_output_tensor(); + // Element types, names and layouts are aligned with framework + auto out_data = output_tensor.data(); + // process output data + //! [ov_api_2_0:get_output_tensor_aligned] +} + int main() { //! [ov_api_2_0:create_core] ov::runtime::Core core; @@ -14,21 +66,24 @@ int main() { //! [ov_api_2_0:read_model] //! [ov_api_2_0:get_inputs_outputs] - ov::ParameterVector inputs = network->get_parameters(); - ov::ResultVector outputs = network->get_results(); + std::vector> inputs = network->inputs(); + std::vector> outputs = network->outputs(); //! [ov_api_2_0:get_inputs_outputs] //! [ov_api_2_0:compile_model] ov::runtime::ExecutableNetwork exec_network = core.compile_model(network, "CPU"); //! [ov_api_2_0:compile_model] + //! [ov_api_2_0:create_infer_request] ov::runtime::InferRequest infer_request = exec_network.create_infer_request(); - // - // InferenceEngine::Blob::Ptr input_blob = infer_request.GetBlob(inputs.begin()->first); - // // fill input blob - // infer_request.Infer(); - // - // InferenceEngine::Blob::Ptr output_blob = infer_request.GetBlob(outputs.begin()->first); - // process output data + //! [ov_api_2_0:create_infer_request] + + inputs_aligned(infer_request); + //! [ov_api_2_0:inference] + infer_request.infer(); + //! [ov_api_2_0:inference] + + outputs_aligned(infer_request); + return 0; } diff --git a/docs/snippets/ov_graph.cpp b/docs/snippets/ov_graph.cpp new file mode 100644 index 00000000000000..b47abce44514cb --- /dev/null +++ b/docs/snippets/ov_graph.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include +#include + +int main() { + //! [ov:graph] + // _____________ _____________ + // | Parameter | | Parameter | + // | data1 | | data2 | + // |___________| |___________| + // | | + // data1_t | | data2_t + // \ / + // \ / + // \ / + // ____\____/____ + // | Concat | + // | concat | + // |____________| + // | + // | concat_t + // | + // _______|_______ + // | Result | + // | result | + // |_____________| + auto data1 = std::make_shared(ov::element::i64, ov::Shape{1, 3, 2, 2}); + data1->set_friendly_name("data1"); // operation name + data1->output(0).set_names({"data1_t"}); // tensor names + auto data2 = std::make_shared(ov::element::i64, ov::Shape{1, 2, 2, 2}); + data2->set_friendly_name("data2"); // operation name + data2->output(0).set_names({"data2_t"}); // tensor names + + auto concat = std::make_shared(ov::OutputVector{data1, data2}, 1); + concat->set_friendly_name("concat"); // operation name + concat->output(0).set_names({"concat_t"}); // tensor name + + auto result = std::make_shared(concat); + result->set_friendly_name("result"); // operation name + + auto f = + std::make_shared(ov::ResultVector{result}, ov::ParameterVector{data1, data2}, "function_name"); + //! [ov:graph] + return 0; +} diff --git a/ngraph/core/include/openvino/core/version.hpp b/ngraph/core/include/openvino/core/version.hpp index 97f82366f9705c..368398ba129352 100644 --- a/ngraph/core/include/openvino/core/version.hpp +++ b/ngraph/core/include/openvino/core/version.hpp @@ -1,7 +1,6 @@ // Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // - #pragma once #include "openvino/core/core_visibility.hpp" From f55e3b28d282645f35e9d586f2302dc3ee6e2553 Mon Sep 17 00:00:00 2001 From: Tomasz Jankowski Date: Wed, 10 Nov 2021 14:50:58 +0100 Subject: [PATCH 32/46] Avoid redundant graph nodes scans (#8415) --- .../frontend/src/detail/subgraph_extraction.cpp | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/ngraph/frontend/onnx/frontend/src/detail/subgraph_extraction.cpp b/ngraph/frontend/onnx/frontend/src/detail/subgraph_extraction.cpp index 6e689a68465a83..fe3a523c1054c9 100644 --- a/ngraph/frontend/onnx/frontend/src/detail/subgraph_extraction.cpp +++ b/ngraph/frontend/onnx/frontend/src/detail/subgraph_extraction.cpp @@ -78,6 +78,10 @@ bool is_graph_initializer(const ONNX_NAMESPACE::GraphProto& graph, const std::st int find_source_node_idx(const ONNX_NAMESPACE::GraphProto& graph, const int current_node_idx, const std::string& input_name) { + // Some operators (e.g. Clip) have optional inputs + if (input_name.empty()) + return -1; + for (int i = current_node_idx - 1; i >= 0; --i) { const auto& outputs = graph.node(i).output(); const auto output_found = std::any_of(std::begin(outputs), std::end(outputs), is_equal_to(input_name)); @@ -329,11 +333,11 @@ void SubgraphExtractor::extract_subgraph(std::vector subgraph_output SubgraphExtractor::SubgraphComponents SubgraphExtractor::discover_output_contributors( const OutputEdge& output_edge, const SubgraphComponents& already_collected) const { - const auto already_visited = [&already_collected](const int node_index) { - return already_collected.nodes.count(node_index) > 0; + SubgraphComponents output_contributors; + const auto already_visited = [&already_collected, &output_contributors](const int node_index) { + return already_collected.nodes.count(node_index) > 0 || output_contributors.nodes.count(node_index) > 0; }; - SubgraphComponents output_contributors; const auto tensor_name = get_output_tensor_name(m_onnx_graph, output_edge); output_contributors.outputs.insert(tensor_name); @@ -357,7 +361,7 @@ SubgraphExtractor::SubgraphComponents SubgraphExtractor::discover_output_contrib // and/or keep looking for more contributors further up in the graph // when an input or initializer is reached, the visitor stops the lookup - const auto n_inputs = m_node_inputs[n]; + const auto& n_inputs = m_node_inputs[n]; for (auto& input_name : n_inputs) { if (is_graph_input(m_onnx_graph, input_name)) { output_contributors.inputs.insert(input_name); @@ -371,7 +375,9 @@ SubgraphExtractor::SubgraphComponents SubgraphExtractor::discover_output_contrib } else { // if an edge points to another node (source node) it should be visited // in one of the future iterations - nodes_to_visit.push(find_source_node_idx(m_onnx_graph, n, input_name)); + const auto node_idx = find_source_node_idx(m_onnx_graph, n, input_name); + if (node_idx >= 0) + nodes_to_visit.push(node_idx); } } } @@ -391,6 +397,7 @@ std::vector SubgraphExtractor::all_output_edges() const { for (const auto& graph_output : m_onnx_graph.output()) { const auto node_index = find_source_node_idx(m_onnx_graph, m_onnx_graph.node_size(), graph_output.name()); + // no need to test node_index against -1 as graph output name must not be empty const auto& node_outputs = m_onnx_graph.node(node_index).output(); const auto output_port_it = std::find(std::begin(node_outputs), std::end(node_outputs), graph_output.name()); all_outputs.emplace_back(node_index, output_port_it - std::begin(node_outputs)); From 372a424cd3b718b08d3d3fd0cf9436ded50d1f44 Mon Sep 17 00:00:00 2001 From: Victor Kuznetsov Date: Wed, 10 Nov 2021 16:59:58 +0300 Subject: [PATCH 33/46] Refactor work with env variables (#8208) * del MO_ROOT * del MO_ROOT from common_utils.py * add MO_PATH to common_utils.py * change mo_path --- tests/layer_tests/common/constants.py | 8 -------- tests/layer_tests/common/utils/common_utils.py | 14 +++++++------- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/tests/layer_tests/common/constants.py b/tests/layer_tests/common/constants.py index cf570fc40c2d88..26e6942d791db7 100644 --- a/tests/layer_tests/common/constants.py +++ b/tests/layer_tests/common/constants.py @@ -4,14 +4,6 @@ import os -if 'MO_ROOT' in os.environ: - mo_bin = os.environ['MO_ROOT'] - if not os.path.exists(mo_bin): - raise EnvironmentError( - "Environment variable MO_ROOT points to non existing path {}".format(mo_bin)) -else: - raise EnvironmentError("MO_ROOT variable is not set") - if os.environ.get('OUTPUT_DIR') is not None: out_path = os.environ['OUTPUT_DIR'] else: diff --git a/tests/layer_tests/common/utils/common_utils.py b/tests/layer_tests/common/utils/common_utils.py index f92daf51c26bcc..e5fc5d93f3773b 100644 --- a/tests/layer_tests/common/utils/common_utils.py +++ b/tests/layer_tests/common/utils/common_utils.py @@ -2,24 +2,24 @@ # SPDX-License-Identifier: Apache-2.0 import logging -import os import subprocess import sys +from pathlib import Path +import mo import numpy as np - logger = logging.getLogger(__name__) def generate_ir(coverage=False, **kwargs): - # Get default mo args - mo = os.path.join(os.environ.get("MO_ROOT"), "mo.py") + mo_path = Path(mo.__file__).parent + mo_runner = mo_path.joinpath('main.py').as_posix() if coverage: - params = [sys.executable, '-m', 'coverage', 'run', '-p', '--source={}'.format(os.environ.get("MO_ROOT")), - '--omit=*_test.py', mo] + params = [sys.executable, '-m', 'coverage', 'run', '-p', '--source={}'.format(mo_path.parent), + '--omit=*_test.py', mo_runner] else: - params = [sys.executable, mo] + params = [sys.executable, mo_runner] for key, value in kwargs.items(): if key == "batch": params.extend(("-b", str(value))) From 3c31285e125299ce070eb33d0a2fd65a087fe7f3 Mon Sep 17 00:00:00 2001 From: Dmitry Pigasin Date: Wed, 10 Nov 2021 17:31:28 +0300 Subject: [PATCH 34/46] [IE Sample Scripts] Use cmake to build samples (#8442) * Use cmake to build samples * Add the option to set custom build output folder --- scripts/demo/run_sample_benchmark_app.bat | 83 ++++------------------- scripts/demo/run_sample_benchmark_app.sh | 13 ++-- scripts/demo/run_sample_squeezenet.bat | 81 ++++------------------ scripts/demo/run_sample_squeezenet.sh | 12 +++- 4 files changed, 47 insertions(+), 142 deletions(-) diff --git a/scripts/demo/run_sample_benchmark_app.bat b/scripts/demo/run_sample_benchmark_app.bat index b9499bcca4e076..18f78faea58d70 100644 --- a/scripts/demo/run_sample_benchmark_app.bat +++ b/scripts/demo/run_sample_benchmark_app.bat @@ -4,12 +4,18 @@ @echo off setlocal enabledelayedexpansion +set ROOT_DIR=%~dp0 + set TARGET=CPU set BUILD_FOLDER=%USERPROFILE%\Documents\Intel\OpenVINO :: command line arguments parsing :input_arguments_loop if not "%1"=="" ( + if "%1"=="-b" ( + set BUILD_FOLDER=%2 + shift + ) if "%1"=="-d" ( set TARGET=%2 echo target = !TARGET! @@ -25,6 +31,7 @@ if not "%1"=="" ( echo. echo Options: echo -help Print help message + echo -b BUILD_FOLDER Specify the sample build directory echo -d DEVICE Specify the target device to infer on; CPU, GPU, HDDL or MYRIAD are acceptable. Sample will look for a suitable plugin for device specified echo -sample-options OPTIONS Specify command line arguments for the sample exit /b @@ -33,12 +40,12 @@ if not "%1"=="" ( goto :input_arguments_loop ) +set "SOLUTION_DIR64=%BUILD_FOLDER%\inference_engine_cpp_samples_build" + IF "%SAMPLE_OPTIONS%"=="" ( set SAMPLE_OPTIONS=-niter 1000 ) -set ROOT_DIR=%~dp0 - set TARGET_PRECISION=FP16 echo target_precision = !TARGET_PRECISION! @@ -158,81 +165,21 @@ if "%PROCESSOR_ARCHITECTURE%" == "AMD64" ( set "PLATFORM=Win32" ) -set VSWHERE="false" -if exist "%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" ( - set VSWHERE="true" - cd /d "%ProgramFiles(x86)%\Microsoft Visual Studio\Installer" -) else if exist "%ProgramFiles%\Microsoft Visual Studio\Installer\vswhere.exe" ( - set VSWHERE="true" - cd /d "%ProgramFiles%\Microsoft Visual Studio\Installer" -) else ( - echo "vswhere tool is not found" -) - -if !VSWHERE! == "true" ( - for /f "usebackq tokens=*" %%i in (`vswhere -latest -products * -requires Microsoft.Component.MSBuild -property installationPath`) do ( - set VS_PATH=%%i - ) - if exist "!VS_PATH!\MSBuild\14.0\Bin\MSBuild.exe" ( - set "MSBUILD_BIN=!VS_PATH!\MSBuild\14.0\Bin\MSBuild.exe" - ) - if exist "!VS_PATH!\MSBuild\15.0\Bin\MSBuild.exe" ( - set "MSBUILD_BIN=!VS_PATH!\MSBuild\15.0\Bin\MSBuild.exe" - ) - if exist "!VS_PATH!\MSBuild\Current\Bin\MSBuild.exe" ( - set "MSBUILD_BIN=!VS_PATH!\MSBuild\Current\Bin\MSBuild.exe" - ) - for /f "usebackq tokens=1 delims=." %%i in (`vswhere -latest -products * -requires Microsoft.Component.MSBuild -property installationVersion`) do ( - set VS_MAJOR_VER=%%i - ) - if "!VS_MAJOR_VER!"=="16" set "MSBUILD_VERSION=16 2019" - if "!VS_MAJOR_VER!"=="15" set "MSBUILD_VERSION=15 2017" -) - -if "!MSBUILD_BIN!" == "" ( - if exist "C:\Program Files (x86)\MSBuild\14.0\Bin\MSBuild.exe" ( - set "MSBUILD_BIN=C:\Program Files (x86)\MSBuild\14.0\Bin\MSBuild.exe" - set "MSBUILD_VERSION=14 2015" - ) - if exist "C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\MSBuild\15.0\Bin\MSBuild.exe" ( - set "MSBUILD_BIN=C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\MSBuild\15.0\Bin\MSBuild.exe" - set "MSBUILD_VERSION=15 2017" - ) - if exist "C:\Program Files (x86)\Microsoft Visual Studio\2017\Professional\MSBuild\15.0\Bin\MSBuild.exe" ( - set "MSBUILD_BIN=C:\Program Files (x86)\Microsoft Visual Studio\2017\Professional\MSBuild\15.0\Bin\MSBuild.exe" - set "MSBUILD_VERSION=15 2017" - ) - if exist "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\MSBuild\15.0\Bin\MSBuild.exe" ( - set "MSBUILD_BIN=C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\MSBuild\15.0\Bin\MSBuild.exe" - set "MSBUILD_VERSION=15 2017" - ) -) else ( - if not "!MSBUILD_BIN:2019=!"=="!MSBUILD_BIN!" set "MSBUILD_VERSION=16 2019" - if not "!MSBUILD_BIN:2017=!"=="!MSBUILD_BIN!" set "MSBUILD_VERSION=15 2017" - if not "!MSBUILD_BIN:2015=!"=="!MSBUILD_BIN!" set "MSBUILD_VERSION=14 2015" -) - -if "!MSBUILD_BIN!" == "" ( - echo Build tools for Visual Studio 2015 / 2017 / 2019 cannot be found. If you use Visual Studio 2017, please download and install build tools from https://www.visualstudio.com/downloads/#build-tools-for-visual-studio-2017 - GOTO errorHandling -) - -set "SOLUTION_DIR64=%BUILD_FOLDER%\inference_engine_cpp_samples_build" - -echo Creating Visual Studio !MSBUILD_VERSION! %PLATFORM% files in %SOLUTION_DIR64%... && ^ if exist "%SOLUTION_DIR64%\CMakeCache.txt" del "%SOLUTION_DIR64%\CMakeCache.txt" -cd /d "%INTEL_OPENVINO_DIR%\samples\cpp" && cmake -E make_directory "%SOLUTION_DIR64%" && cd /d "%SOLUTION_DIR64%" && cmake -G "Visual Studio !MSBUILD_VERSION!" -A %PLATFORM% "%INTEL_OPENVINO_DIR%\samples\cpp" + +cd /d "%INTEL_OPENVINO_DIR%\samples\cpp" && cmake -E make_directory "%SOLUTION_DIR64%" && cd /d "%SOLUTION_DIR64%" && cmake -G "Visual Studio 16 2019" -A %PLATFORM% "%INTEL_OPENVINO_DIR%\samples\cpp" if ERRORLEVEL 1 GOTO errorHandling CALL :delay 7 echo. -echo ###############^|^| Build Inference Engine samples using MS Visual Studio (MSBuild.exe) ^|^|############### +echo ###############^|^| Build Inference Engine samples using cmake ^|^|############### echo. + CALL :delay 3 -echo "!MSBUILD_BIN!" Samples.sln /p:Configuration=Release /t:cpp_samples\benchmark_app /clp:ErrorsOnly /m -"!MSBUILD_BIN!" Samples.sln /p:Configuration=Release /t:cpp_samples\benchmark_app /clp:ErrorsOnly /m +echo cmake --build . --config Release --target benchmark_app +cmake --build . --config Release --target benchmark_app if ERRORLEVEL 1 GOTO errorHandling CALL :delay 7 diff --git a/scripts/demo/run_sample_benchmark_app.sh b/scripts/demo/run_sample_benchmark_app.sh index 9e273d6c2a515e..d7b1077afcca76 100755 --- a/scripts/demo/run_sample_benchmark_app.sh +++ b/scripts/demo/run_sample_benchmark_app.sh @@ -6,6 +6,7 @@ echo -ne "\e[0;33mWARNING: If you get an error when running the sample in the Docker container, you may need to install additional packages. To do this, run the container as root (-u 0) and run install_openvino_dependencies.sh script. If you get a package-independent error, try setting additional parameters using -sample-options.\e[0m\n" ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]-$0}" )" && pwd )" +build_dir="$HOME/inference_engine_cpp_samples_build" . "$ROOT_DIR/utils.sh" @@ -14,6 +15,7 @@ usage() { echo echo "Options:" echo " -help Print help message" + echo " -b BUILD_DIR Specify the sample build directory" echo " -d DEVICE Specify the target device to infer on; CPU, GPU, HDDL or MYRIAD are acceptable. Sample will look for a suitable plugin for device specified" echo " -sample-options OPTIONS Specify command line arguments for the sample" echo @@ -30,6 +32,10 @@ do key="$1" case $key in + -b | --build_dir) + build_dir="$2/inference_engine_cpp_samples_build" + shift + ;; -h | -help | --help) usage ;; @@ -58,9 +64,9 @@ target_precision="FP16" echo -ne "target_precision = ${target_precision}\n" -models_path="$HOME/openvino_models/models" -models_cache="$HOME/openvino_models/cache" -irs_path="$HOME/openvino_models/ir" +models_path="$build_dir/../openvino_models/models" +models_cache="$build_dir/../openvino_models/cache" +irs_path="$build_dir/../openvino_models/ir" model_name="squeezenet1.1" @@ -163,7 +169,6 @@ if [ "$OS_PATH" == "x86_64" ]; then fi samples_path="${INTEL_OPENVINO_DIR}/samples/cpp" -build_dir="$HOME/inference_engine_cpp_samples_build" binaries_dir="${build_dir}/${OS_PATH}/Release" if [ -e "$build_dir/CMakeCache.txt" ]; then diff --git a/scripts/demo/run_sample_squeezenet.bat b/scripts/demo/run_sample_squeezenet.bat index 293168cb9e710d..2ccb9e7c18b022 100644 --- a/scripts/demo/run_sample_squeezenet.bat +++ b/scripts/demo/run_sample_squeezenet.bat @@ -4,12 +4,18 @@ @echo off setlocal enabledelayedexpansion +set ROOT_DIR=%~dp0 + set TARGET=CPU set BUILD_FOLDER=%USERPROFILE%\Documents\Intel\OpenVINO :: command line arguments parsing :input_arguments_loop if not "%1"=="" ( + if "%1"=="-b" ( + set BUILD_FOLDER=%2 + shift + ) if "%1"=="-d" ( set TARGET=%2 echo target = !TARGET! @@ -25,6 +31,7 @@ if not "%1"=="" ( echo. echo Options: echo -help Print help message + echo -b BUILD_FOLDER Specify the sample build directory echo -d DEVICE Specify the target device to infer on; CPU, GPU, HDDL or MYRIAD are acceptable. Sample will look for a suitable plugin for device specified echo -sample-options OPTIONS Specify command line arguments for the sample exit /b @@ -33,7 +40,7 @@ if not "%1"=="" ( goto :input_arguments_loop ) -set ROOT_DIR=%~dp0 +set "SOLUTION_DIR64=%BUILD_FOLDER%\inference_engine_cpp_samples_build" set TARGET_PRECISION=FP16 @@ -154,81 +161,21 @@ if "%PROCESSOR_ARCHITECTURE%" == "AMD64" ( set "PLATFORM=Win32" ) -set VSWHERE="false" -if exist "%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" ( - set VSWHERE="true" - cd /d "%ProgramFiles(x86)%\Microsoft Visual Studio\Installer" -) else if exist "%ProgramFiles%\Microsoft Visual Studio\Installer\vswhere.exe" ( - set VSWHERE="true" - cd /d "%ProgramFiles%\Microsoft Visual Studio\Installer" -) else ( - echo "vswhere tool is not found" -) - -if !VSWHERE! == "true" ( - for /f "usebackq tokens=*" %%i in (`vswhere -latest -products * -requires Microsoft.Component.MSBuild -property installationPath`) do ( - set VS_PATH=%%i - ) - if exist "!VS_PATH!\MSBuild\14.0\Bin\MSBuild.exe" ( - set "MSBUILD_BIN=!VS_PATH!\MSBuild\14.0\Bin\MSBuild.exe" - ) - if exist "!VS_PATH!\MSBuild\15.0\Bin\MSBuild.exe" ( - set "MSBUILD_BIN=!VS_PATH!\MSBuild\15.0\Bin\MSBuild.exe" - ) - if exist "!VS_PATH!\MSBuild\Current\Bin\MSBuild.exe" ( - set "MSBUILD_BIN=!VS_PATH!\MSBuild\Current\Bin\MSBuild.exe" - ) - for /f "usebackq tokens=1 delims=." %%i in (`vswhere -latest -products * -requires Microsoft.Component.MSBuild -property installationVersion`) do ( - set VS_MAJOR_VER=%%i - ) - if "!VS_MAJOR_VER!"=="16" set "MSBUILD_VERSION=16 2019" - if "!VS_MAJOR_VER!"=="15" set "MSBUILD_VERSION=15 2017" -) - -if "!MSBUILD_BIN!" == "" ( - if exist "C:\Program Files (x86)\MSBuild\14.0\Bin\MSBuild.exe" ( - set "MSBUILD_BIN=C:\Program Files (x86)\MSBuild\14.0\Bin\MSBuild.exe" - set "MSBUILD_VERSION=14 2015" - ) - if exist "C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\MSBuild\15.0\Bin\MSBuild.exe" ( - set "MSBUILD_BIN=C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\MSBuild\15.0\Bin\MSBuild.exe" - set "MSBUILD_VERSION=15 2017" - ) - if exist "C:\Program Files (x86)\Microsoft Visual Studio\2017\Professional\MSBuild\15.0\Bin\MSBuild.exe" ( - set "MSBUILD_BIN=C:\Program Files (x86)\Microsoft Visual Studio\2017\Professional\MSBuild\15.0\Bin\MSBuild.exe" - set "MSBUILD_VERSION=15 2017" - ) - if exist "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\MSBuild\15.0\Bin\MSBuild.exe" ( - set "MSBUILD_BIN=C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\MSBuild\15.0\Bin\MSBuild.exe" - set "MSBUILD_VERSION=15 2017" - ) -) else ( - if not "!MSBUILD_BIN:2019=!"=="!MSBUILD_BIN!" set "MSBUILD_VERSION=16 2019" - if not "!MSBUILD_BIN:2017=!"=="!MSBUILD_BIN!" set "MSBUILD_VERSION=15 2017" - if not "!MSBUILD_BIN:2015=!"=="!MSBUILD_BIN!" set "MSBUILD_VERSION=14 2015" -) - -if "!MSBUILD_BIN!" == "" ( - echo Build tools for Visual Studio 2015 / 2017 / 2019 cannot be found. If you use Visual Studio 2017, please download and install build tools from https://www.visualstudio.com/downloads/#build-tools-for-visual-studio-2017 - GOTO errorHandling -) - -set "SOLUTION_DIR64=%BUILD_FOLDER%\inference_engine_cpp_samples_build" - -echo Creating Visual Studio !MSBUILD_VERSION! %PLATFORM% files in %SOLUTION_DIR64%... && ^ if exist "%SOLUTION_DIR64%\CMakeCache.txt" del "%SOLUTION_DIR64%\CMakeCache.txt" -cd /d "%INTEL_OPENVINO_DIR%\samples\cpp" && cmake -E make_directory "%SOLUTION_DIR64%" && cd /d "%SOLUTION_DIR64%" && cmake -G "Visual Studio !MSBUILD_VERSION!" -A %PLATFORM% "%INTEL_OPENVINO_DIR%\samples\cpp" + +cd /d "%INTEL_OPENVINO_DIR%\samples\cpp" && cmake -E make_directory "%SOLUTION_DIR64%" && cd /d "%SOLUTION_DIR64%" && cmake -G "Visual Studio 16 2019" -A %PLATFORM% "%INTEL_OPENVINO_DIR%\samples\cpp" if ERRORLEVEL 1 GOTO errorHandling CALL :delay 7 echo. -echo ###############^|^| Build Inference Engine samples using MS Visual Studio (MSBuild.exe) ^|^|############### +echo ###############^|^| Build Inference Engine samples using cmake ^|^|############### echo. + CALL :delay 3 -echo "!MSBUILD_BIN!" Samples.sln /p:Configuration=Release /t:cpp_samples\classification_sample_async /clp:ErrorsOnly /m -"!MSBUILD_BIN!" Samples.sln /p:Configuration=Release /t:cpp_samples\classification_sample_async /clp:ErrorsOnly /m +echo cmake --build . --config Release --target classification_sample_async +cmake --build . --config Release --target classification_sample_async if ERRORLEVEL 1 GOTO errorHandling CALL :delay 7 diff --git a/scripts/demo/run_sample_squeezenet.sh b/scripts/demo/run_sample_squeezenet.sh index 3d1b12a1fec6fc..1ce3a61cb9d5c7 100755 --- a/scripts/demo/run_sample_squeezenet.sh +++ b/scripts/demo/run_sample_squeezenet.sh @@ -6,6 +6,7 @@ echo -ne "\e[0;33mWARNING: If you get an error when running the sample in the Docker container, you may need to install additional packages. To do this, run the container as root (-u 0) and run install_openvino_dependencies.sh script. If you get a package-independent error, try setting additional parameters using -sample-options.\e[0m\n" ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]-$0}" )" && pwd )" +build_dir="$HOME/inference_engine_cpp_samples_build" . "$ROOT_DIR/utils.sh" @@ -14,6 +15,7 @@ usage() { echo echo "Options:" echo " -help Print help message" + echo " -b BUILD_DIR Specify the sample build directory" echo " -d DEVICE Specify the target device to infer on; CPU, GPU, HDDL or MYRIAD are acceptable. Sample will look for a suitable plugin for device specified" echo " -sample-options OPTIONS Specify command line arguments for the sample" echo @@ -30,6 +32,10 @@ do key="$1" case $key in + -b | --build_dir) + build_dir="$2/inference_engine_cpp_samples_build" + shift + ;; -h | -help | --help) usage ;; @@ -54,9 +60,9 @@ target_precision="FP16" echo -ne "target_precision = ${target_precision}\n" -models_path="$HOME/openvino_models/models" -models_cache="$HOME/openvino_models/cache" -irs_path="$HOME/openvino_models/ir" +models_path="$build_dir/../openvino_models/models" +models_cache="$build_dir/../openvino_models/cache" +irs_path="$build_dir/../openvino_models/ir" model_name="squeezenet1.1" From f069af7d45b0d0c65cce2ca0b2faf5bd893343ed Mon Sep 17 00:00:00 2001 From: Artur Kulikowski Date: Wed, 10 Nov 2021 15:47:12 +0100 Subject: [PATCH 35/46] Remove opset8 from compatibility ngraph python API (#8452) --- runtime/bindings/python/setup.py | 1 - .../src/compatibility/ngraph/__init__.py | 311 ++++++++------- .../compatibility/ngraph/opset8/__init__.py | 161 -------- .../src/compatibility/ngraph/opset8/ops.py | 369 ------------------ .../ngraph/utils/node_factory.py | 2 +- .../compatibility/pyngraph/node_factory.cpp | 1 - .../test_ngraph/test_adaptive_pool.py | 63 --- .../test_ngraph/test_create_op.py | 243 +++++------- .../test_ngraph/test_pooling.py | 189 ++------- .../test_ngraph/test_random_uniform.py | 27 -- 10 files changed, 282 insertions(+), 1085 deletions(-) delete mode 100644 runtime/bindings/python/src/compatibility/ngraph/opset8/__init__.py delete mode 100644 runtime/bindings/python/src/compatibility/ngraph/opset8/ops.py delete mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_adaptive_pool.py delete mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_random_uniform.py diff --git a/runtime/bindings/python/setup.py b/runtime/bindings/python/setup.py index aebc98676d6625..83c84174e6e1bf 100644 --- a/runtime/bindings/python/setup.py +++ b/runtime/bindings/python/setup.py @@ -33,7 +33,6 @@ "ngraph.opset5", "ngraph.opset6", "ngraph.opset7", - "ngraph.opset8", "ngraph.utils", "ngraph.impl", "ngraph.impl.op", diff --git a/runtime/bindings/python/src/compatibility/ngraph/__init__.py b/runtime/bindings/python/src/compatibility/ngraph/__init__.py index 8b12a3c7ff9d82..a19e06c7a1a486 100644 --- a/runtime/bindings/python/src/compatibility/ngraph/__init__.py +++ b/runtime/bindings/python/src/compatibility/ngraph/__init__.py @@ -27,164 +27,159 @@ from ngraph.helpers import function_from_cnn from ngraph.helpers import function_to_cnn from ngraph.helpers import partial_shape_from_data -from ngraph.opset8 import absolute -from ngraph.opset8 import absolute as abs -from ngraph.opset8 import acos -from ngraph.opset8 import acosh -from ngraph.opset8 import adaptive_avg_pool -from ngraph.opset8 import adaptive_max_pool -from ngraph.opset8 import add -from ngraph.opset8 import asin -from ngraph.opset8 import asinh -from ngraph.opset8 import assign -from ngraph.opset8 import atan -from ngraph.opset8 import atanh -from ngraph.opset8 import avg_pool -from ngraph.opset8 import batch_norm_inference -from ngraph.opset8 import batch_to_space -from ngraph.opset8 import binary_convolution -from ngraph.opset8 import broadcast -from ngraph.opset8 import bucketize -from ngraph.opset8 import ceiling -from ngraph.opset8 import ceiling as ceil -from ngraph.opset8 import clamp -from ngraph.opset8 import concat -from ngraph.opset8 import constant -from ngraph.opset8 import convert -from ngraph.opset8 import convert_like -from ngraph.opset8 import convolution -from ngraph.opset8 import convolution_backprop_data -from ngraph.opset8 import cos -from ngraph.opset8 import cosh -from ngraph.opset8 import ctc_greedy_decoder -from ngraph.opset8 import ctc_greedy_decoder_seq_len -from ngraph.opset8 import ctc_loss -from ngraph.opset8 import cum_sum -from ngraph.opset8 import cum_sum as cumsum -from ngraph.opset8 import deformable_convolution -from ngraph.opset8 import deformable_psroi_pooling -from ngraph.opset8 import depth_to_space -from ngraph.opset8 import detection_output -from ngraph.opset8 import dft -from ngraph.opset8 import divide -from ngraph.opset8 import einsum -from ngraph.opset8 import elu -from ngraph.opset8 import embedding_bag_offsets_sum -from ngraph.opset8 import embedding_bag_packed_sum -from ngraph.opset8 import embedding_segments_sum -from ngraph.opset8 import extract_image_patches -from ngraph.opset8 import equal -from ngraph.opset8 import erf -from ngraph.opset8 import exp -from ngraph.opset8 import fake_quantize -from ngraph.opset8 import floor -from ngraph.opset8 import floor_mod -from ngraph.opset8 import gather -from ngraph.opset8 import gather_elements -from ngraph.opset8 import gather_nd -from ngraph.opset8 import gather_tree -from ngraph.opset8 import gelu -from ngraph.opset8 import greater -from ngraph.opset8 import greater_equal -from ngraph.opset8 import grn -from ngraph.opset8 import group_convolution -from ngraph.opset8 import group_convolution_backprop_data -from ngraph.opset8 import gru_cell -from ngraph.opset8 import gru_sequence -from ngraph.opset8 import hard_sigmoid -from ngraph.opset8 import hsigmoid -from ngraph.opset8 import hswish -from ngraph.opset8 import idft -from ngraph.opset8 import interpolate -from ngraph.opset8 import less -from ngraph.opset8 import less_equal -from ngraph.opset8 import log -from ngraph.opset8 import logical_and -from ngraph.opset8 import logical_not -from ngraph.opset8 import logical_or -from ngraph.opset8 import logical_xor -from ngraph.opset8 import log_softmax -from ngraph.opset8 import loop -from ngraph.opset8 import lrn -from ngraph.opset8 import lstm_cell -from ngraph.opset8 import lstm_sequence -from ngraph.opset8 import matmul -from ngraph.opset8 import matrix_nms -from ngraph.opset8 import max_pool -from ngraph.opset8 import maximum -from ngraph.opset8 import minimum -from ngraph.opset8 import mish -from ngraph.opset8 import mod -from ngraph.opset8 import multiclass_nms -from ngraph.opset8 import multiply -from ngraph.opset8 import mvn -from ngraph.opset8 import negative -from ngraph.opset8 import non_max_suppression -from ngraph.opset8 import non_zero -from ngraph.opset8 import normalize_l2 -from ngraph.opset8 import not_equal -from ngraph.opset8 import one_hot -from ngraph.opset8 import pad -from ngraph.opset8 import parameter -from ngraph.opset8 import power -from ngraph.opset8 import prelu -from ngraph.opset8 import prior_box -from ngraph.opset8 import prior_box_clustered -from ngraph.opset8 import psroi_pooling -from ngraph.opset8 import proposal -from ngraph.opset8 import random_uniform -from ngraph.opset8 import range -from ngraph.opset8 import read_value -from ngraph.opset8 import reduce_l1 -from ngraph.opset8 import reduce_l2 -from ngraph.opset8 import reduce_logical_and -from ngraph.opset8 import reduce_logical_or -from ngraph.opset8 import reduce_max -from ngraph.opset8 import reduce_mean -from ngraph.opset8 import reduce_min -from ngraph.opset8 import reduce_prod -from ngraph.opset8 import reduce_sum -from ngraph.opset8 import region_yolo -from ngraph.opset8 import reorg_yolo -from ngraph.opset8 import relu -from ngraph.opset8 import reshape -from ngraph.opset8 import result -from ngraph.opset8 import reverse_sequence -from ngraph.opset8 import rnn_cell -from ngraph.opset8 import rnn_sequence -from ngraph.opset8 import roi_align -from ngraph.opset8 import roi_pooling -from ngraph.opset8 import roll -from ngraph.opset8 import round -from ngraph.opset8 import scatter_elements_update -from ngraph.opset8 import scatter_update -from ngraph.opset8 import select -from ngraph.opset8 import selu -from ngraph.opset8 import shape_of -from ngraph.opset8 import shuffle_channels -from ngraph.opset8 import sigmoid -from ngraph.opset8 import sign -from ngraph.opset8 import sin -from ngraph.opset8 import sinh -from ngraph.opset8 import softmax -from ngraph.opset8 import softplus -from ngraph.opset8 import space_to_batch -from ngraph.opset8 import space_to_depth -from ngraph.opset8 import split -from ngraph.opset8 import sqrt -from ngraph.opset8 import squared_difference -from ngraph.opset8 import squeeze -from ngraph.opset8 import strided_slice -from ngraph.opset8 import subtract -from ngraph.opset8 import swish -from ngraph.opset8 import tan -from ngraph.opset8 import tanh -from ngraph.opset8 import tensor_iterator -from ngraph.opset8 import tile -from ngraph.opset8 import topk -from ngraph.opset8 import transpose -from ngraph.opset8 import unsqueeze -from ngraph.opset8 import variadic_split +from ngraph.opset7 import absolute +from ngraph.opset7 import absolute as abs +from ngraph.opset7 import acos +from ngraph.opset7 import acosh +from ngraph.opset7 import add +from ngraph.opset7 import asin +from ngraph.opset7 import asinh +from ngraph.opset7 import assign +from ngraph.opset7 import atan +from ngraph.opset7 import atanh +from ngraph.opset7 import avg_pool +from ngraph.opset7 import batch_norm_inference +from ngraph.opset7 import batch_to_space +from ngraph.opset7 import binary_convolution +from ngraph.opset7 import broadcast +from ngraph.opset7 import bucketize +from ngraph.opset7 import ceiling +from ngraph.opset7 import ceiling as ceil +from ngraph.opset7 import clamp +from ngraph.opset7 import concat +from ngraph.opset7 import constant +from ngraph.opset7 import convert +from ngraph.opset7 import convert_like +from ngraph.opset7 import convolution +from ngraph.opset7 import convolution_backprop_data +from ngraph.opset7 import cos +from ngraph.opset7 import cosh +from ngraph.opset7 import ctc_greedy_decoder +from ngraph.opset7 import ctc_greedy_decoder_seq_len +from ngraph.opset7 import ctc_loss +from ngraph.opset7 import cum_sum +from ngraph.opset7 import cum_sum as cumsum +from ngraph.opset7 import deformable_convolution +from ngraph.opset7 import deformable_psroi_pooling +from ngraph.opset7 import depth_to_space +from ngraph.opset7 import detection_output +from ngraph.opset7 import dft +from ngraph.opset7 import divide +from ngraph.opset7 import einsum +from ngraph.opset7 import elu +from ngraph.opset7 import embedding_bag_offsets_sum +from ngraph.opset7 import embedding_bag_packed_sum +from ngraph.opset7 import embedding_segments_sum +from ngraph.opset7 import extract_image_patches +from ngraph.opset7 import equal +from ngraph.opset7 import erf +from ngraph.opset7 import exp +from ngraph.opset7 import fake_quantize +from ngraph.opset7 import floor +from ngraph.opset7 import floor_mod +from ngraph.opset7 import gather +from ngraph.opset7 import gather_elements +from ngraph.opset7 import gather_nd +from ngraph.opset7 import gather_tree +from ngraph.opset7 import gelu +from ngraph.opset7 import greater +from ngraph.opset7 import greater_equal +from ngraph.opset7 import grn +from ngraph.opset7 import group_convolution +from ngraph.opset7 import group_convolution_backprop_data +from ngraph.opset7 import gru_cell +from ngraph.opset7 import gru_sequence +from ngraph.opset7 import hard_sigmoid +from ngraph.opset7 import hsigmoid +from ngraph.opset7 import hswish +from ngraph.opset7 import idft +from ngraph.opset7 import interpolate +from ngraph.opset7 import less +from ngraph.opset7 import less_equal +from ngraph.opset7 import log +from ngraph.opset7 import logical_and +from ngraph.opset7 import logical_not +from ngraph.opset7 import logical_or +from ngraph.opset7 import logical_xor +from ngraph.opset7 import log_softmax +from ngraph.opset7 import loop +from ngraph.opset7 import lrn +from ngraph.opset7 import lstm_cell +from ngraph.opset7 import lstm_sequence +from ngraph.opset7 import matmul +from ngraph.opset7 import max_pool +from ngraph.opset7 import maximum +from ngraph.opset7 import minimum +from ngraph.opset7 import mish +from ngraph.opset7 import mod +from ngraph.opset7 import multiply +from ngraph.opset7 import mvn +from ngraph.opset7 import negative +from ngraph.opset7 import non_max_suppression +from ngraph.opset7 import non_zero +from ngraph.opset7 import normalize_l2 +from ngraph.opset7 import not_equal +from ngraph.opset7 import one_hot +from ngraph.opset7 import pad +from ngraph.opset7 import parameter +from ngraph.opset7 import power +from ngraph.opset7 import prelu +from ngraph.opset7 import prior_box +from ngraph.opset7 import prior_box_clustered +from ngraph.opset7 import psroi_pooling +from ngraph.opset7 import proposal +from ngraph.opset7 import range +from ngraph.opset7 import read_value +from ngraph.opset7 import reduce_l1 +from ngraph.opset7 import reduce_l2 +from ngraph.opset7 import reduce_logical_and +from ngraph.opset7 import reduce_logical_or +from ngraph.opset7 import reduce_max +from ngraph.opset7 import reduce_mean +from ngraph.opset7 import reduce_min +from ngraph.opset7 import reduce_prod +from ngraph.opset7 import reduce_sum +from ngraph.opset7 import region_yolo +from ngraph.opset7 import reorg_yolo +from ngraph.opset7 import relu +from ngraph.opset7 import reshape +from ngraph.opset7 import result +from ngraph.opset7 import reverse_sequence +from ngraph.opset7 import rnn_cell +from ngraph.opset7 import rnn_sequence +from ngraph.opset7 import roi_align +from ngraph.opset7 import roi_pooling +from ngraph.opset7 import roll +from ngraph.opset7 import round +from ngraph.opset7 import scatter_elements_update +from ngraph.opset7 import scatter_update +from ngraph.opset7 import select +from ngraph.opset7 import selu +from ngraph.opset7 import shape_of +from ngraph.opset7 import shuffle_channels +from ngraph.opset7 import sigmoid +from ngraph.opset7 import sign +from ngraph.opset7 import sin +from ngraph.opset7 import sinh +from ngraph.opset7 import softmax +from ngraph.opset7 import softplus +from ngraph.opset7 import space_to_batch +from ngraph.opset7 import space_to_depth +from ngraph.opset7 import split +from ngraph.opset7 import sqrt +from ngraph.opset7 import squared_difference +from ngraph.opset7 import squeeze +from ngraph.opset7 import strided_slice +from ngraph.opset7 import subtract +from ngraph.opset7 import swish +from ngraph.opset7 import tan +from ngraph.opset7 import tanh +from ngraph.opset7 import tensor_iterator +from ngraph.opset7 import tile +from ngraph.opset7 import topk +from ngraph.opset7 import transpose +from ngraph.opset7 import unsqueeze +from ngraph.opset7 import variadic_split # Extend Node class to support binary operators diff --git a/runtime/bindings/python/src/compatibility/ngraph/opset8/__init__.py b/runtime/bindings/python/src/compatibility/ngraph/opset8/__init__.py deleted file mode 100644 index f0d0dfdd2dbf64..00000000000000 --- a/runtime/bindings/python/src/compatibility/ngraph/opset8/__init__.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset4.ops import acosh -from ngraph.opset8.ops import adaptive_avg_pool -from ngraph.opset8.ops import adaptive_max_pool -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset4.ops import asinh -from ngraph.opset3.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset4.ops import atanh -from ngraph.opset1.ops import avg_pool -from ngraph.opset5.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset6.ops import ctc_greedy_decoder_seq_len -from ngraph.opset4.ops import ctc_loss -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset8.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset1.ops import detection_output -from ngraph.opset7.ops import dft -from ngraph.opset1.ops import divide -from ngraph.opset7.ops import einsum -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset8.ops import gather -from ngraph.opset6.ops import gather_elements -from ngraph.opset5.ops import gather_nd -from ngraph.opset1.ops import gather_tree -from ngraph.opset7.ops import gelu -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset5.ops import gru_sequence -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset5.ops import hsigmoid -from ngraph.opset4.ops import hswish -from ngraph.opset7.ops import idft -from ngraph.opset1.ops import interpolate -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset5.ops import log_softmax -from ngraph.opset5.ops import loop -from ngraph.opset1.ops import lrn -from ngraph.opset4.ops import lstm_cell -from ngraph.opset1.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset8.ops import matrix_nms -from ngraph.opset8.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset4.ops import mish -from ngraph.opset1.ops import mod -from ngraph.opset8.ops import multiclass_nms -from ngraph.opset1.ops import multiply -from ngraph.opset6.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset5.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset1.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset4.ops import proposal -from ngraph.opset8.ops import random_uniform -from ngraph.opset1.ops import range -from ngraph.opset3.ops import read_value -from ngraph.opset4.ops import reduce_l1 -from ngraph.opset4.ops import reduce_l2 -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset5.ops import rnn_sequence -from ngraph.opset3.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset7.ops import roll -from ngraph.opset5.ops import round -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset1.ops import softmax -from ngraph.opset4.ops import softplus -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset4.ops import swish -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset3.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/runtime/bindings/python/src/compatibility/ngraph/opset8/ops.py b/runtime/bindings/python/src/compatibility/ngraph/opset8/ops.py deleted file mode 100644 index 6c355930b7c021..00000000000000 --- a/runtime/bindings/python/src/compatibility/ngraph/opset8/ops.py +++ /dev/null @@ -1,369 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from functools import partial -from typing import Callable, Iterable, List, Optional, Set, Union - -import numpy as np -from ngraph.impl import Node, Shape -from ngraph.impl.op import Constant, Parameter -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import binary_op, nameable_op, unary_op -from ngraph.utils.input_validation import ( - assert_list_of_ints, - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) -from ngraph.utils.node_factory import NodeFactory -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorSliceInputDesc, - TensorIteratorMergedInputDesc, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, - TensorIteratorConcatOutputDesc, -) -from ngraph.utils.types import ( - NodeInput, - NumericData, - NumericType, - ScalarData, - TensorShape, - as_node, - as_nodes, - get_dtype, - get_element_type, - get_element_type_str, - make_constant_node, -) - -_get_node_factory_opset8 = partial(_get_node_factory, "opset8") - - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def deformable_convolution( - data: NodeInput, - offsets: NodeInput, - filters: NodeInput, - strides: List[int], - pads_begin: List[int], - pads_end: List[int], - dilations: List[int], - mask: Optional[NodeInput] = None, - auto_pad: str = "EXPLICIT", - group: int = 1, - deformable_group: int = 1, - bilinear_interpolation_pad: bool = False, - name: Optional[str] = None, -) -> Node: - """Return a node which performs deformable convolution operation. - - @param data: The node providing data batch tensor. - @param offsets: The node providing offset tensor. - @param filters: The node providing filters tensor. - @param strides: The distance (in pixels) to slide the filter on the feature map over the axes. - @param pads_begin: The number of pixels to add to the beginning along each axis. - @param pads_end: The number of pixels to add to the end along each axis. - @param dilations: The distance in width and height between elements (weights) in the filter. - @param mask: The node providing modulation scalar (mask) tensor. - @param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. - @param group: The number of groups which both output and input should be split into. - @param deformable_group: The number of groups which deformable values and output should be split - into along the channel axis. - @param bilinear_interpolation_pad: The flag that determines the mode of bilinear interpolation - execution. - @param name: The optional new name for output node. - @return New node performing deformable convolution operation. - """ - if mask is None: - inputs = as_nodes(data, offsets, filters) - else: - inputs = as_nodes(data, offsets, filters, mask) - - return _get_node_factory_opset8().create( - "DeformableConvolution", - inputs, - { - "strides": strides, - "pads_begin": pads_begin, - "pads_end": pads_end, - "dilations": dilations, - "auto_pad": auto_pad, - "group": group, - "deformable_group": deformable_group, - "bilinear_interpolation_pad": bilinear_interpolation_pad - }, - ) - - -@nameable_op -def adaptive_avg_pool( - data: NodeInput, - output_shape: NodeInput -) -> Node: - """Return a node which performs AdaptiveAvgPool operation. - - @param data: The list of input nodes - @param output_shape: the shape of spatial dimentions after operation - @return: The new node performing AdaptiveAvgPool operation on the data - """ - inputs = as_nodes(data, output_shape) - return _get_node_factory_opset8().create("AdaptiveAvgPool", inputs) - - -@nameable_op -def adaptive_max_pool( - data: NodeInput, - output_shape: NodeInput, - index_element_type: str = "i64" -) -> Node: - """Return a node which performs AdaptiveMaxPool operation. - - @param data: The list of input nodes - @param output_shape: the shape of spatial dimentions after operation - @param index_element_type: Type of indices output. - @return: The new node performing AdaptiveMaxPool operation on the data - """ - inputs = as_nodes(data, output_shape) - - attributes = { - "index_element_type": index_element_type, - } - - return _get_node_factory_opset8().create("AdaptiveMaxPool", inputs, attributes) - - -@nameable_op -def multiclass_nms( - boxes: NodeInput, - scores: NodeInput, - sort_result_type: str = "none", - sort_result_across_batch: bool = False, - output_type: str = "i64", - iou_threshold: float = 0.0, - score_threshold: float = 0.0, - nms_top_k: int = -1, - keep_top_k: int = -1, - background_class: int = -1, - nms_eta: float = 1.0, - normalized: bool = True -) -> Node: - """Return a node which performs MulticlassNms. - - @param boxes: Tensor with box coordinates. - @param scores: Tensor with box scores. - @param sort_result_type: Specifies order of output elements, possible values: - 'class': sort selected boxes by class id (ascending) - 'score': sort selected boxes by score (descending) - 'none': do not guarantee the order. - @param sort_result_across_batch: Specifies whenever it is necessary to sort selected boxes - across batches or not - @param output_type: Specifies the output tensor type, possible values: - 'i64', 'i32' - @param iou_threshold: Specifies intersection over union threshold - @param score_threshold: Specifies minimum score to consider box for the processing - @param nms_top_k: Specifies maximum number of boxes to be selected per class, -1 meaning - to keep all boxes - @param keep_top_k: Specifies maximum number of boxes to be selected per batch element, -1 - meaning to keep all boxes - @param background_class: Specifies the background class id, -1 meaning to keep all classes - @param nms_eta: Specifies eta parameter for adpative NMS, in close range [0, 1.0] - @param normalized: Specifies whether boxes are normalized or not - @return: The new node which performs MuticlassNms - """ - inputs = as_nodes(boxes, scores) - - attributes = { - "sort_result_type": sort_result_type, - "sort_result_across_batch": sort_result_across_batch, - "output_type": output_type, - "iou_threshold": iou_threshold, - "score_threshold": score_threshold, - "nms_top_k": nms_top_k, - "keep_top_k": keep_top_k, - "background_class": background_class, - "nms_eta": nms_eta, - "normalized": normalized - } - - return _get_node_factory_opset8().create("MulticlassNms", inputs, attributes) - - -@nameable_op -def matrix_nms( - boxes: NodeInput, - scores: NodeInput, - sort_result_type: str = "none", - sort_result_across_batch: bool = False, - output_type: str = "i64", - score_threshold: float = 0.0, - nms_top_k: int = -1, - keep_top_k: int = -1, - background_class: int = -1, - decay_function: str = "linear", - gaussian_sigma: float = 2.0, - post_threshold: float = 0.0, - normalized: bool = True -) -> Node: - """Return a node which performs MatrixNms. - - @param boxes: Tensor with box coordinates. - @param scores: Tensor with box scores. - @param sort_result_type: Specifies order of output elements, possible values: - 'class': sort selected boxes by class id (ascending) - 'score': sort selected boxes by score (descending) - 'none': do not guarantee the order. - @param sort_result_across_batch: Specifies whenever it is necessary to sort selected boxes - across batches or not - @param output_type: Specifies the output tensor type, possible values: - 'i64', 'i32' - @param score_threshold: Specifies minimum score to consider box for the processing - @param nms_top_k: Specifies maximum number of boxes to be selected per class, -1 meaning - to keep all boxes - @param keep_top_k: Specifies maximum number of boxes to be selected per batch element, -1 - meaning to keep all boxes - @param background_class: Specifies the background class id, -1 meaning to keep all classes - @param decay_function: Specifies decay function used to decay scores, possible values: - 'gaussian', 'linear' - @param gaussian_sigma: Specifies gaussian_sigma parameter for gaussian decay_function - @param post_threshold: Specifies threshold to filter out boxes with low confidence score - after decaying - @param normalized: Specifies whether boxes are normalized or not - @return: The new node which performs MatrixNms - """ - inputs = as_nodes(boxes, scores) - - attributes = { - "sort_result_type": sort_result_type, - "sort_result_across_batch": sort_result_across_batch, - "output_type": output_type, - "score_threshold": score_threshold, - "nms_top_k": nms_top_k, - "keep_top_k": keep_top_k, - "background_class": background_class, - "decay_function": decay_function, - "gaussian_sigma": gaussian_sigma, - "post_threshold": post_threshold, - "normalized": normalized - } - - return _get_node_factory_opset8().create("MatrixNms", inputs, attributes) - - -@nameable_op -def gather( - data: NodeInput, - indices: NodeInput, - axis: NodeInput, - batch_dims: Optional[int] = 0, -) -> Node: - """Return a node which performs Gather with support of negative indices. - - @param data: N-D tensor with data for gathering - @param indices: N-D tensor with indices by which data is gathered. Negative indices - indicate reverse indexing from the end - @param axis: axis along which elements are gathered - @param batch_dims: number of batch dimensions - @return: The new node which performs Gather - """ - inputs = as_nodes(data, indices, axis) - attributes = { - "batch_dims": batch_dims - } - return _get_node_factory_opset8().create("Gather", inputs, attributes) - - -@nameable_op -def max_pool( - data: NodeInput, - strides: List[int], - dilations: List[int], - pads_begin: List[int], - pads_end: List[int], - kernel_shape: TensorShape, - rounding_type: str = "floor", - auto_pad: Optional[str] = None, - index_element_type: Optional[str] = "i64", - axis: Optional[int] = 0, - name: Optional[str] = None, -) -> Node: - """Perform max pooling operation and return both values and indices of the selected elements. - - @param data: The node providing input data. - @param strides: The distance (in pixels) to slide the filter on the feature map - over the axes. - @param dilations: The dilation of filter elements(distance between elements). - @param pads_begin: The number of pixels to add at the beginning along each axis. - @param pads_end: The number of pixels to add at the end along each axis. - @param kernel_shape: The pooling operation kernel shape. - @param rounding_type: Determines used rounding schema when computing output shape. - Acceptable values are: ['floor', 'ceil']. Defaults to 'floor'. - @param auto_pad: Determines how the padding is calculated. Acceptable values: - [None, 'same_upper', 'same_lower', 'valid']. Defaults to None. - @param index_element_type: The data type used for the indices output of this operator. - Defaults to i64. - @param axis: The first dimension in the data shape used to determine the maximum - returned index value. The value is the product of all dimensions - starting at the provided axis. Defaults to 0. - @param name: The optional name for the created output node. - - @return The new node performing max pooling operation. - """ - if auto_pad is None: - auto_pad = "explicit" - return _get_node_factory_opset8().create( - "MaxPool", - [as_node(data)], - { - "strides": strides, - "dilations": dilations, - "pads_begin": pads_begin, - "pads_end": pads_end, - "kernel": kernel_shape, - "rounding_type": rounding_type.upper(), - "auto_pad": auto_pad.upper(), - "index_element_type": index_element_type, - "axis": axis, - }, - ) - - -@nameable_op -def random_uniform( - output_shape: NodeInput, - min_val: NodeInput, - max_val: NodeInput, - output_type: str, - global_seed: int = 0, - op_seed: int = 0 -) -> Node: - """Return a node which generates sequence of random values from uniform distribution. - - @param output_shape: Tensor with shape of the output tensor. - @param min_val: Tensor with the lower bound on the range of random values to generate. - @param max_val: Tensor with the upper bound on the range of random values to generate. - @param output_type: Specifies the output tensor type, possible values: - 'i64', 'i32', 'f64', 'f32', 'f16', 'bf16'. - @param global_seed: Specifies global seed value. Required to be a positive integer or 0. - @param op_seed: Specifies operational seed value. Required to be a positive integer or 0. - @return The new node which performs generation of random values from uniform distribution. - """ - inputs = as_nodes(output_shape, min_val, max_val) - - if global_seed < 0: - raise RuntimeError("global_seed should be positive or 0. Got: {}".format(global_seed)) - - if op_seed < 0: - raise RuntimeError("op_seed should be positive or 0. Got: {}".format(op_seed)) - - attributes = { - "output_type": output_type, - "global_seed": global_seed, - "op_seed": op_seed, - } - return _get_node_factory_opset8().create("RandomUniform", inputs, attributes) diff --git a/runtime/bindings/python/src/compatibility/ngraph/utils/node_factory.py b/runtime/bindings/python/src/compatibility/ngraph/utils/node_factory.py index ffb0c3d861ccc8..83cb9e094b0887 100644 --- a/runtime/bindings/python/src/compatibility/ngraph/utils/node_factory.py +++ b/runtime/bindings/python/src/compatibility/ngraph/utils/node_factory.py @@ -12,7 +12,7 @@ from ngraph.exceptions import UserInputError -DEFAULT_OPSET = "opset8" +DEFAULT_OPSET = "opset7" class NodeFactory(object): diff --git a/runtime/bindings/python/src/compatibility/pyngraph/node_factory.cpp b/runtime/bindings/python/src/compatibility/pyngraph/node_factory.cpp index e93e3ce30bbac1..e7d7ee2aa563e3 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/node_factory.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/node_factory.cpp @@ -82,7 +82,6 @@ class NodeFactory { {"opset5", OpsetFunction(ngraph::get_opset5)}, {"opset6", OpsetFunction(ngraph::get_opset6)}, {"opset7", OpsetFunction(ngraph::get_opset7)}, - {"opset8", OpsetFunction(ngraph::get_opset8)}, }; auto it = s_opsets.find(opset_ver); diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_adaptive_pool.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_adaptive_pool.py deleted file mode 100644 index ba7fe7b28f3508..00000000000000 --- a/runtime/bindings/python/tests_compatibility/test_ngraph/test_adaptive_pool.py +++ /dev/null @@ -1,63 +0,0 @@ -import ngraph as ng -import numpy as np -from tests_compatibility.runtime import get_runtime - - -def test_adaptive_avg_pool(): - runtime = get_runtime() - input = np.reshape([0.0, 4, 1, 3, -2, -5, -2, - -2, 1, -3, 1, -3, -4, 0, - -2, 1, -1, -2, 3, -1, -3, - - -1, -2, 3, 4, -3, -4, 1, - 2, 0, -4, -5, -2, -2, -3, - 2, 3, 1, -5, 2, -4, -2], (2, 3, 7)) - input_tensor = ng.constant(input) - output_shape = ng.constant(np.array([3], dtype=np.int32)) - - adaptive_pool_node = ng.adaptive_avg_pool(input_tensor, output_shape) - computation = runtime.computation(adaptive_pool_node) - adaptive_pool_results = computation() - expected_results = np.reshape([1.66666663, 0.66666669, -3., - -1.33333337, -1.66666663, -2.33333325, - -0.66666669, 0., -0.33333334, - - 0., 1.33333337, -2., - -0.66666669, -3.66666675, -2.33333325, - 2., -0.66666669, -1.33333337], (2, 3, 3)) - - assert np.allclose(adaptive_pool_results, expected_results) - - -def test_adaptive_max_pool(): - runtime = get_runtime() - input = np.reshape([0, 4, 1, 3, -2, -5, -2, - -2, 1, -3, 1, -3, -4, 0, - -2, 1, -1, -2, 3, -1, -3, - - -1, -2, 3, 4, -3, -4, 1, - 2, 0, -4, -5, -2, -2, -3, - 2, 3, 1, -5, 2, -4, -2], (2, 3, 7)) - input_tensor = ng.constant(input) - output_shape = ng.constant(np.array([3], dtype=np.int32)) - - adaptive_pool_node = ng.adaptive_max_pool(input_tensor, output_shape) - computation = runtime.computation(adaptive_pool_node) - adaptive_pool_results = computation() - expected_results = np.reshape([4, 3, -2, - 1, 1, 0, - 1, 3, 3, - - 3, 4, 1, - 2, -2, -2, - 3, 2, 2], (2, 3, 3)) - - expected_indices = np.reshape([1, 3, 4, - 1, 3, 6, - 1, 4, 4, - - 2, 3, 6, - 0, 4, 4, - 1, 4, 4], (2, 3, 3)) - - assert np.allclose(adaptive_pool_results, [expected_results, expected_indices]) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_create_op.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_create_op.py index 673d7a2ebf10b4..d8cc5d8a19e11c 100644 --- a/runtime/bindings/python/tests_compatibility/test_ngraph/test_create_op.py +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_create_op.py @@ -3,7 +3,7 @@ import numpy as np import pytest -from _pyngraph import PartialShape, Dimension +from _pyngraph import PartialShape import ngraph as ng import ngraph.opset1 as ng_opset1 @@ -23,33 +23,6 @@ ] -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_adaptive_avg_pool(dtype): - data = ng.parameter([2, 24, 34, 62], name="input", dtype=dtype) - output_shape = ng.constant(np.array([16, 16], dtype=np.int32)) - - node = ng.adaptive_avg_pool(data, output_shape) - - assert node.get_type_name() == "AdaptiveAvgPool" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [2, 24, 16, 16] - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -@pytest.mark.parametrize("ind_type", ["i32", "i64"]) -def test_adaptive_max_pool(dtype, ind_type): - data = ng.parameter([2, 24, 34, 62], name="input", dtype=dtype) - output_shape = ng.constant(np.array([16, 16], dtype=np.int32)) - - node = ng.adaptive_max_pool(data, output_shape, ind_type) - - assert node.get_type_name() == "AdaptiveMaxPool" - assert node.get_output_size() == 2 - assert list(node.get_output_shape(0)) == [2, 24, 16, 16] - assert list(node.get_output_shape(1)) == [2, 24, 16, 16] - assert node.get_output_element_type(1) == Type.i32 if ind_type == "i32" else Type.i64 - - @pytest.mark.parametrize("dtype", [np.float32, np.float64]) def test_binary_convolution(dtype): strides = np.array([1, 1]) @@ -67,7 +40,14 @@ def test_binary_convolution(dtype): parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) node = ng.binary_convolution( - parameter_input0, parameter_input1, strides, pads_begin, pads_end, dilations, mode, pad_value, + parameter_input0, + parameter_input1, + strides, + pads_begin, + pads_end, + dilations, + mode, + pad_value, ) assert node.get_type_name() == "BinaryConvolution" @@ -91,26 +71,30 @@ def test_ctc_greedy_decoder(dtype): assert list(node.get_output_shape(0)) == expected_shape -@pytest.mark.parametrize("fp_dtype, int_dtype, int_ci, int_sl, merge_repeated, blank_index", - [ - (np.float32, np.int32, "i32", "i32", True, True), - (np.float32, np.int32, "i64", "i32", True, True), - (np.float32, np.int32, "i32", "i64", True, True), - (np.float32, np.int32, "i64", "i64", True, True), - (np.float64, np.int64, "i32", "i32", False, True), - (np.float64, np.int64, "i64", "i32", False, True), - (np.float64, np.int64, "i32", "i64", False, True), - (np.float64, np.int64, "i64", "i64", False, True), - (np.float32, np.int32, "i32", "i32", True, False), - (np.float32, np.int32, "i64", "i32", True, False), - (np.float32, np.int32, "i32", "i64", True, False), - (np.float32, np.int32, "i64", "i64", True, False), - (np.float64, np.int64, "i32", "i32", False, False), - (np.float64, np.int64, "i64", "i32", False, False), - (np.float64, np.int64, "i32", "i64", False, False), - (np.float64, np.int64, "i64", "i64", False, False) - ],) -def test_ctc_greedy_decoder_seq_len(fp_dtype, int_dtype, int_ci, int_sl, merge_repeated, blank_index): +@pytest.mark.parametrize( + "fp_dtype, int_dtype, int_ci, int_sl, merge_repeated, blank_index", + [ + (np.float32, np.int32, "i32", "i32", True, True), + (np.float32, np.int32, "i64", "i32", True, True), + (np.float32, np.int32, "i32", "i64", True, True), + (np.float32, np.int32, "i64", "i64", True, True), + (np.float64, np.int64, "i32", "i32", False, True), + (np.float64, np.int64, "i64", "i32", False, True), + (np.float64, np.int64, "i32", "i64", False, True), + (np.float64, np.int64, "i64", "i64", False, True), + (np.float32, np.int32, "i32", "i32", True, False), + (np.float32, np.int32, "i64", "i32", True, False), + (np.float32, np.int32, "i32", "i64", True, False), + (np.float32, np.int32, "i64", "i64", True, False), + (np.float64, np.int64, "i32", "i32", False, False), + (np.float64, np.int64, "i64", "i32", False, False), + (np.float64, np.int64, "i32", "i64", False, False), + (np.float64, np.int64, "i64", "i64", False, False), + ], +) +def test_ctc_greedy_decoder_seq_len( + fp_dtype, int_dtype, int_ci, int_sl, merge_repeated, blank_index +): input0_shape = [8, 20, 128] input1_shape = [8] input2_shape = [1] @@ -123,7 +107,12 @@ def test_ctc_greedy_decoder_seq_len(fp_dtype, int_dtype, int_ci, int_sl, merge_r parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=int_dtype) node = ng.ctc_greedy_decoder_seq_len( - parameter_input0, parameter_input1, parameter_input2, merge_repeated, int_ci, int_sl + parameter_input0, + parameter_input1, + parameter_input2, + merge_repeated, + int_ci, + int_sl, ) assert node.get_type_name() == "CTCGreedyDecoderSeqLen" @@ -148,7 +137,13 @@ def test_deformable_convolution_opset1(dtype): parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) node = ng_opset1.deformable_convolution( - parameter_input0, parameter_input1, parameter_input2, strides, pads_begin, pads_end, dilations, + parameter_input0, + parameter_input1, + parameter_input2, + strides, + pads_begin, + pads_end, + dilations, ) assert node.get_type_name() == "DeformableConvolution" @@ -173,35 +168,13 @@ def test_deformable_convolution(dtype): parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) node = ng.deformable_convolution( - parameter_input0, parameter_input1, parameter_input2, strides, pads_begin, pads_end, dilations, - ) - - assert node.get_type_name() == "DeformableConvolution" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - - -@pytest.mark.parametrize("dtype", np_types) -def test_deformable_convolution_mask(dtype): - strides = np.array([1, 1]) - pads_begin = np.array([0, 0]) - pads_end = np.array([0, 0]) - dilations = np.array([1, 1]) - - input0_shape = [1, 1, 9, 9] - input1_shape = [1, 18, 7, 7] - input2_shape = [1, 1, 3, 3] - input3_shape = [1, 9, 7, 7] - expected_shape = [1, 1, 7, 7] - - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) - parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) - parameter_input3 = ng.parameter(input3_shape, name="Input3", dtype=dtype) - - node = ng.deformable_convolution( - parameter_input0, parameter_input1, parameter_input2, strides, - pads_begin, pads_end, dilations, parameter_input3 + parameter_input0, + parameter_input1, + parameter_input2, + strides, + pads_begin, + pads_end, + dilations, ) assert node.get_type_name() == "DeformableConvolution" @@ -277,7 +250,9 @@ def test_gather_tree(dtype): parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) parameter_input3 = ng.parameter(input3_shape, name="Input3", dtype=dtype) - node = ng.gather_tree(parameter_input0, parameter_input1, parameter_input2, parameter_input3) + node = ng.gather_tree( + parameter_input0, parameter_input1, parameter_input2, parameter_input3 + ) assert node.get_type_name() == "GatherTree" assert node.get_output_size() == 1 @@ -307,7 +282,13 @@ def test_lstm_cell_operator(dtype): expected_shape = [1, 128] node_default = ng.lstm_cell( - parameter_X, parameter_H_t, parameter_C_t, parameter_W, parameter_R, parameter_B, hidden_size, + parameter_X, + parameter_H_t, + parameter_C_t, + parameter_W, + parameter_R, + parameter_B, + hidden_size, ) assert node_default.get_type_name() == "LSTMCell" @@ -363,7 +344,13 @@ def test_lstm_cell_operator_opset1(dtype): expected_shape = [1, 128] node_default = ng_opset1.lstm_cell( - parameter_X, parameter_H_t, parameter_C_t, parameter_W, parameter_R, parameter_B, hidden_size, + parameter_X, + parameter_H_t, + parameter_C_t, + parameter_W, + parameter_R, + parameter_B, + hidden_size, ) assert node_default.get_type_name() == "LSTMCell" @@ -612,7 +599,9 @@ def test_gru_cell_operator(): expected_shape = [1, 128] - node_default = ng.gru_cell(parameter_X, parameter_H_t, parameter_W, parameter_R, parameter_B, hidden_size) + node_default = ng.gru_cell( + parameter_X, parameter_H_t, parameter_W, parameter_R, parameter_B, hidden_size + ) assert node_default.get_type_name() == "GRUCell" assert node_default.get_output_size() == 1 @@ -820,8 +809,10 @@ def test_loop(): ti_inputs = [iter_cnt, data, initial_cma, one] body_const_condition = ng.constant(True, dtype=np.bool) - graph_body = GraphBody([body_timestep, body_data_in, body_prev_cma, body_const_one], - [curr_cma, cma_hist, body_const_condition]) + graph_body = GraphBody( + [body_timestep, body_data_in, body_prev_cma, body_const_one], + [curr_cma, cma_hist, body_const_condition], + ) ti_slice_input_desc = [ # timestep # input_idx, body_param_idx, start, stride, part_size, end, axis @@ -926,7 +917,9 @@ def test_region_yolo(): end_axis = 3 do_softmax = False - node = ng.region_yolo(data, num_coords, num_classes, num_regions, do_softmax, mask, axis, end_axis) + node = ng.region_yolo( + data, num_coords, num_classes, num_regions, do_softmax, mask, axis, end_axis + ) assert node.get_type_name() == "RegionYolo" assert node.get_output_size() == 1 @@ -996,7 +989,9 @@ def test_embedding_segments_sum_with_some_opt_inputs(): def test_embedding_bag_packed_sum(): emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32) indices = ng.parameter([3, 3], name="indices", dtype=np.int64) - per_sample_weights = ng.parameter([3, 3], name="per_sample_weights", dtype=np.float32) + per_sample_weights = ng.parameter( + [3, 3], name="per_sample_weights", dtype=np.float32 + ) # only 1 out of 3 optional inputs node = ng.embedding_bag_packed_sum(emb_table, indices, per_sample_weights) @@ -1048,7 +1043,7 @@ def test_prior_box(int_dtype, fp_dtype): "offset": fp_dtype(0), "min_size": np.array([2, 3], dtype=fp_dtype), "aspect_ratio": np.array([1.5, 2.0, 2.5], dtype=fp_dtype), - "scale_all_sizes": False + "scale_all_sizes": False, } layer_shape = ng.constant(np.array([32, 32], dtype=int_dtype), int_dtype) @@ -1120,7 +1115,9 @@ def test_detection_output(int_dtype, fp_dtype): aux_class_preds = ng.parameter([4, 4], fp_dtype, "aux_class_preds") aux_box_preds = ng.parameter([4, 8], fp_dtype, "aux_box_preds") - node = ng.detection_output(box_logits, class_preds, proposals, attributes, aux_class_preds, aux_box_preds) + node = ng.detection_output( + box_logits, class_preds, proposals, attributes, aux_class_preds, aux_box_preds + ) assert node.get_type_name() == "DetectionOutput" assert node.get_output_size() == 1 @@ -1158,7 +1155,10 @@ def test_proposal(int_dtype, fp_dtype): assert node.get_type_name() == "Proposal" assert node.get_output_size() == 2 - assert list(node.get_output_shape(0)) == [batch_size * attributes["post_nms_topn"], 5] + assert list(node.get_output_shape(0)) == [ + batch_size * attributes["post_nms_topn"], + 5, + ] def test_tensor_iterator(): @@ -1193,7 +1193,10 @@ def test_tensor_iterator(): iter_cnt = ng.range(zero, np.int32(16), np.int32(1)) ti_inputs = [iter_cnt, data, initial_cma, one] - graph_body = GraphBody([body_timestep, body_data_in, body_prev_cma, body_const_one], [curr_cma, cma_hist]) + graph_body = GraphBody( + [body_timestep, body_data_in, body_prev_cma, body_const_one], + [curr_cma, cma_hist], + ) ti_slice_input_desc = [ # timestep # input_idx, body_param_idx, start, stride, part_size, end, axis @@ -1551,7 +1554,7 @@ def test_gru_sequence_operator_bidirectional(dtype): activation_alpha, activation_beta, clip, - linear_before_reset + linear_before_reset, ) assert node_param.get_type_name() == "GRUSequence" @@ -1617,7 +1620,7 @@ def test_gru_sequence_operator_reverse(dtype): activation_alpha, activation_beta, clip, - linear_before_reset + linear_before_reset, ) assert node_param.get_type_name() == "GRUSequence" @@ -1683,7 +1686,7 @@ def test_gru_sequence_operator_forward(dtype): activation_alpha, activation_beta, clip, - linear_before_reset + linear_before_reset, ) assert node.get_type_name() == "GRUSequence" @@ -1873,53 +1876,3 @@ def test_rnn_sequence_operator_forward(dtype): assert node.get_type_name() == "RNNSequence" assert node.get_output_size() == 2 - - -def test_multiclass_nms(): - boxes_data = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.1, 1.0, 1.1, - 0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0, - 0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0], dtype="float32") - boxes_data = boxes_data.reshape([1, 6, 4]) - box = ng.constant(boxes_data, dtype=np.float) - scores_data = np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3, - 0.95, 0.75, 0.6, 0.80, 0.5, 0.3], dtype="float32") - scores_data = scores_data.reshape([1, 2, 6]) - score = ng.constant(scores_data, dtype=np.float) - - nms_node = ng.multiclass_nms(box, score, output_type="i32", nms_top_k=3, - iou_threshold=0.5, score_threshold=0.0, sort_result_type="classid", - nms_eta=1.0) - - assert nms_node.get_type_name() == "MulticlassNms" - assert nms_node.get_output_size() == 3 - assert nms_node.outputs()[0].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(6)]) - assert nms_node.outputs()[1].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(1)]) - assert list(nms_node.outputs()[2].get_shape()) == [1, ] - assert nms_node.get_output_element_type(0) == Type.f32 - assert nms_node.get_output_element_type(1) == Type.i32 - assert nms_node.get_output_element_type(2) == Type.i32 - - -def test_matrix_nms(): - boxes_data = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.1, 1.0, 1.1, - 0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0, - 0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0], dtype="float32") - boxes_data = boxes_data.reshape([1, 6, 4]) - box = ng.constant(boxes_data, dtype=np.float) - scores_data = np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3, - 0.95, 0.75, 0.6, 0.80, 0.5, 0.3], dtype="float32") - scores_data = scores_data.reshape([1, 2, 6]) - score = ng.constant(scores_data, dtype=np.float) - - nms_node = ng.matrix_nms(box, score, output_type="i32", nms_top_k=3, - score_threshold=0.0, sort_result_type="score", background_class=0, - decay_function="linear", gaussian_sigma=2.0, post_threshold=0.0) - - assert nms_node.get_type_name() == "MatrixNms" - assert nms_node.get_output_size() == 3 - assert nms_node.outputs()[0].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(6)]) - assert nms_node.outputs()[1].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(1)]) - assert list(nms_node.outputs()[2].get_shape()) == [1, ] - assert nms_node.get_output_element_type(0) == Type.f32 - assert nms_node.get_output_element_type(1) == Type.i32 - assert nms_node.get_output_element_type(2) == Type.i32 diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_pooling.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_pooling.py index 423c519272b4ed..38f4fded429986 100644 --- a/runtime/bindings/python/tests_compatibility/test_ngraph/test_pooling.py +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_pooling.py @@ -26,14 +26,18 @@ def test_avg_pool_2d(_ndarray_1x1x4x4): exclude_pad = True expected = [[[[13.5, 15.5], [21.5, 23.5]]]] - avg_pool_node = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) + avg_pool_node = ng.avg_pool( + param, strides, pads_begin, pads_end, kernel_shape, exclude_pad + ) computation = runtime.computation(avg_pool_node, param) result = computation(input_data) assert np.allclose(result, expected) expected = [[[[13.5, 14.5, 15.5], [17.5, 18.5, 19.5], [21.5, 22.5, 23.5]]]] strides = [1, 1] - avg_pool_node = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) + avg_pool_node = ng.avg_pool( + param, strides, pads_begin, pads_end, kernel_shape, exclude_pad + ) computation = runtime.computation(avg_pool_node, param) result = computation(input_data) assert np.allclose(result, expected) @@ -44,14 +48,18 @@ def test_avg_pool_2d(_ndarray_1x1x4x4): exclude_pad = True expected = [[[[11.0, 12.5, 14.0], [17.0, 18.5, 20.0], [23.0, 24.5, 26.0]]]] - avg_pool_node = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) + avg_pool_node = ng.avg_pool( + param, strides, pads_begin, pads_end, kernel_shape, exclude_pad + ) computation = runtime.computation(avg_pool_node, param) result = computation(input_data) assert np.allclose(result, expected) exclude_pad = False expected = [[[[2.75, 6.25, 3.5], [8.5, 18.5, 10.0], [5.75, 12.25, 6.5]]]] - avg_pool_node = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) + avg_pool_node = ng.avg_pool( + param, strides, pads_begin, pads_end, kernel_shape, exclude_pad + ) computation = runtime.computation(avg_pool_node, param) result = computation(input_data) assert np.allclose(result, expected) @@ -69,7 +77,9 @@ def test_avg_pooling_3d(_ndarray_1x1x4x4): pads_end = [0] * spatial_dim_count exclude_pad = True - avgpool = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) + avgpool = ng.avg_pool( + param, strides, pads_begin, pads_end, kernel_shape, exclude_pad + ) comp = rt.computation(avgpool, param) result = comp(data) result_ref = [[[[[13.5, 15.5], [21.5, 23.5]], [[13.5, 15.5], [21.5, 23.5]]]]] @@ -85,35 +95,20 @@ def test_max_pool_basic(): # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) strides = [1, 1] - dilations = [1, 1] pads_begin = [0, 0] pads_end = [0, 0] kernel_shape = [2, 2] - rounding_type = "floor" - auto_pad = None - index_et = "i32" data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( - data_node, - strides, - dilations, - pads_begin, - pads_end, - kernel_shape, - rounding_type, - auto_pad, - index_et, - ) + maxpool_node = ng.max_pool(data_node, strides, pads_begin, pads_end, kernel_shape) comp = rt.computation(maxpool_node, data_node) + result = comp(data) expected = np.array( [[[[5.5, 6.5, 7.5], [9.5, 10.5, 11.5], [13.5, 14.5, 15.5]]]], dtype=np.float32 ) - expected_idx = np.array([[[[5, 6, 7], [9, 10, 11], [13, 14, 15]]]], dtype=np.int32) - assert np.allclose(result[0], expected) - assert np.allclose(result[1], expected_idx) + assert np.allclose(result, expected) def test_max_pool_strides(): @@ -125,33 +120,17 @@ def test_max_pool_strides(): # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) strides = [2, 1] - dilations = [1, 1] pads_begin = [0, 0] pads_end = [0, 0] kernel_shape = [2, 2] - rounding_type = "floor" - auto_pad = None - index_et = "i32" data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( - data_node, - strides, - dilations, - pads_begin, - pads_end, - kernel_shape, - rounding_type, - auto_pad, - index_et, - ) + maxpool_node = ng.max_pool(data_node, strides, pads_begin, pads_end, kernel_shape) comp = rt.computation(maxpool_node, data_node) result = comp(data) expected = np.array([[[[5.5, 6.5, 7.5], [13.5, 14.5, 15.5]]]], dtype=np.float32) - expected_idx = np.array([[[[5, 6, 7], [13, 14, 15]]]], dtype=np.int32) - assert np.allclose(result[0], expected) - assert np.allclose(result[1], expected_idx) + assert np.allclose(result, expected) def test_max_pool_kernel_shape1x1(): @@ -163,31 +142,16 @@ def test_max_pool_kernel_shape1x1(): # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) strides = [1, 1] - dilations = [1, 1] pads_begin = [0, 0] pads_end = [0, 0] kernel_shape = [1, 1] - rounding_type = "floor" - auto_pad = None - index_et = "i32" data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( - data_node, - strides, - dilations, - pads_begin, - pads_end, - kernel_shape, - rounding_type, - auto_pad, - index_et, - ) + maxpool_node = ng.max_pool(data_node, strides, pads_begin, pads_end, kernel_shape) comp = rt.computation(maxpool_node, data_node) result = comp(data) - assert np.allclose(result[0], data) - assert np.allclose(result[1], np.arange(0, 16, dtype=np.int32).reshape((1, 1, 4, 4))) + assert np.allclose(result, data) def test_max_pool_kernel_shape3x3(): @@ -199,31 +163,17 @@ def test_max_pool_kernel_shape3x3(): # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) strides = [1, 1] - dilations = [1, 1] pads_begin = [0, 0] pads_end = [0, 0] kernel_shape = [3, 3] - rounding_type = "floor" - auto_pad = None - index_et = "i32" data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( - data_node, - strides, - dilations, - pads_begin, - pads_end, - kernel_shape, - rounding_type, - auto_pad, - index_et, - ) + maxpool_node = ng.max_pool(data_node, strides, pads_begin, pads_end, kernel_shape) comp = rt.computation(maxpool_node, data_node) result = comp(data) expected = np.array([[[[10.5, 11.5], [14.5, 15.5]]]], dtype=np.float32) - assert np.allclose(result[0], expected) + assert np.allclose(result, expected) def test_max_pool_non_zero_pads(): @@ -235,7 +185,6 @@ def test_max_pool_non_zero_pads(): # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) strides = [1, 1] - dilations = [1, 1] pads_begin = [1, 1] pads_end = [1, 1] # 0 0 , 0 , 0 , 0, 0 @@ -245,22 +194,9 @@ def test_max_pool_non_zero_pads(): # 0 [12.5, 13.5, 14.5, 15.5], 0 # 0 0 , 0 , 0 , 0, 0 kernel_shape = [2, 2] - rounding_type = "floor" - auto_pad = None - index_et = "i32" data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( - data_node, - strides, - dilations, - pads_begin, - pads_end, - kernel_shape, - rounding_type, - auto_pad, - index_et, - ) + maxpool_node = ng.max_pool(data_node, strides, pads_begin, pads_end, kernel_shape) comp = rt.computation(maxpool_node, data_node) result = comp(data) @@ -278,22 +214,7 @@ def test_max_pool_non_zero_pads(): ], dtype=np.float32, ) - expected_idx = np.array( - [ - [ - [ - [0, 1, 2, 3, 3], - [4, 5, 6, 7, 7], - [8, 9, 10, 11, 11], - [12, 13, 14, 15, 15], - [12, 13, 14, 15, 15], - ] - ] - ], - dtype=np.int32, - ) - assert np.allclose(result[0], expected) - assert np.allclose(result[1], expected_idx) + assert np.allclose(result, expected) def test_max_pool_same_upper_auto_pads(): @@ -305,7 +226,6 @@ def test_max_pool_same_upper_auto_pads(): # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) strides = [1, 1] - dilations = [1, 1] pads_begin = [0, 0] pads_end = [0, 0] # [ 0.5, 1.5, 2.5, 3.5], 0, @@ -315,20 +235,10 @@ def test_max_pool_same_upper_auto_pads(): # 0 , 0 , 0 , 0, 0 kernel_shape = [2, 2] auto_pad = "same_upper" - rounding_type = "floor" - index_et = "i32" data_node = ng.parameter(data.shape, name="A", dtype=np.float32) maxpool_node = ng.max_pool( - data_node, - strides, - dilations, - pads_begin, - pads_end, - kernel_shape, - rounding_type, - auto_pad, - index_et, + data_node, strides, pads_begin, pads_end, kernel_shape, auto_pad=auto_pad ) comp = rt.computation(maxpool_node, data_node) result = comp(data) @@ -346,21 +256,7 @@ def test_max_pool_same_upper_auto_pads(): ], dtype=np.float32, ) - expected_idx = np.array( - [ - [ - [ - [5, 6, 7, 7], - [9, 10, 11, 11], - [13, 14, 15, 15], - [13, 14, 15, 15], - ] - ] - ], - dtype=np.int32, - ) - assert np.allclose(result[0], expected) - assert np.allclose(result[1], expected_idx) + assert np.allclose(result, expected) def test_max_pool_same_lower_auto_pads(): @@ -372,7 +268,6 @@ def test_max_pool_same_lower_auto_pads(): # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) strides = [1, 1] - dilations = [1, 1] pads_begin = [0, 0] pads_end = [0, 0] # 0 0 , 0 , 0 , 0, @@ -382,20 +277,10 @@ def test_max_pool_same_lower_auto_pads(): # 0 [12.5, 13.5, 14.5, 15.5], kernel_shape = [2, 2] auto_pad = "same_lower" - rounding_type = "floor" - index_et = "i32" data_node = ng.parameter(data.shape, name="A", dtype=np.float32) maxpool_node = ng.max_pool( - data_node, - strides, - dilations, - pads_begin, - pads_end, - kernel_shape, - rounding_type, - auto_pad, - index_et, + data_node, strides, pads_begin, pads_end, kernel_shape, auto_pad=auto_pad ) comp = rt.computation(maxpool_node, data_node) result = comp(data) @@ -413,18 +298,4 @@ def test_max_pool_same_lower_auto_pads(): ], dtype=np.float32, ) - expected_idx = np.array( - [ - [ - [ - [0, 1, 2, 3], - [4, 5, 6, 7], - [8, 9, 10, 11], - [12, 13, 14, 15], - ] - ] - ], - dtype=np.int32, - ) - assert np.allclose(result[0], expected) - assert np.allclose(result[1], expected_idx) + assert np.allclose(result, expected) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_random_uniform.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_random_uniform.py deleted file mode 100644 index c82654c7167f07..00000000000000 --- a/runtime/bindings/python/tests_compatibility/test_ngraph/test_random_uniform.py +++ /dev/null @@ -1,27 +0,0 @@ -import ngraph as ng -import numpy as np -from tests_compatibility.runtime import get_runtime - - -def test_random_uniform(): - runtime = get_runtime() - input_tensor = ng.constant(np.array([2, 4, 3], dtype=np.int32)) - min_val = ng.constant(np.array([-2.7], dtype=np.float32)) - max_val = ng.constant(np.array([3.5], dtype=np.float32)) - - random_uniform_node = ng.random_uniform(input_tensor, min_val, max_val, - output_type="f32", global_seed=7461, - op_seed=1546) - computation = runtime.computation(random_uniform_node) - random_uniform_results = computation() - expected_results = np.array([[[2.8450181, -2.3457108, 2.2134445], - [-1.0436587, 0.79548645, 1.3023183], - [0.34447956, -2.0267959, 1.3989122], - [0.9607613, 1.5363653, 3.117298]], - - [[1.570041, 2.2782724, 2.3193843], - [3.3393657, 0.63299894, 0.41231918], - [3.1739233, 0.03919673, -0.2136085], - [-1.4519991, -2.277353, 2.630727]]], dtype=np.float32) - - assert np.allclose(random_uniform_results, expected_results) From 4051fdc5f19c2c988c2f75582106a0a75f0ff8e8 Mon Sep 17 00:00:00 2001 From: Ilya Znamenskiy Date: Wed, 10 Nov 2021 17:58:11 +0300 Subject: [PATCH 36/46] [GPU] OneDNN gpu submodule update to version 2.5 (#8449) * [GPU] OneDNN gpu submodule update to version 2.5 * [GPU] Updated onednn submodule and added layout optimizer fix --- inference-engine/thirdparty/clDNN/src/impls/ocl/reduce.cpp | 5 +++++ inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp | 6 ++---- thirdparty/onednn_gpu | 2 +- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/inference-engine/thirdparty/clDNN/src/impls/ocl/reduce.cpp b/inference-engine/thirdparty/clDNN/src/impls/ocl/reduce.cpp index ef412f5dc345b3..e63cf6dd315e70 100644 --- a/inference-engine/thirdparty/clDNN/src/impls/ocl/reduce.cpp +++ b/inference-engine/thirdparty/clDNN/src/impls/ocl/reduce.cpp @@ -101,6 +101,11 @@ attach_reduce_impl::attach_reduce_impl() { std::make_tuple(data_types::i32, format::b_fs_yx_fsv16), std::make_tuple(data_types::i8, format::b_fs_yx_fsv16), std::make_tuple(data_types::u8, format::b_fs_yx_fsv16), + std::make_tuple(data_types::f32, format::b_fs_yx_fsv32), + std::make_tuple(data_types::f16, format::b_fs_yx_fsv32), + std::make_tuple(data_types::i32, format::b_fs_yx_fsv32), + std::make_tuple(data_types::i8, format::b_fs_yx_fsv32), + std::make_tuple(data_types::u8, format::b_fs_yx_fsv32), }); } diff --git a/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp b/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp index 5b9f0fb311f275..65f6aa42112fa4 100644 --- a/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp +++ b/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp @@ -866,10 +866,8 @@ layout layout_optimizer::get_expected_layout(layout const& current_layout, } } else if ((_optimization_attributes.b_fs_yx_fsv16_network && convolution_b_fs_yx_fsv16_opt(input_layout, output_layout, weights_layout, prim)) && is_2d) { - if (is_dw) - expected_format = cldnn::format::b_fs_yx_fsv32; - else - expected_format = cldnn::format::b_fs_yx_fsv16; + // TODO: optimize clDNN kernels for good support of b_fs_yx_fsv32 format + expected_format = cldnn::format::b_fs_yx_fsv32; } else { expected_format = imad_case(node); } diff --git a/thirdparty/onednn_gpu b/thirdparty/onednn_gpu index 1ab3f50381ae85..75d978369d0c5b 160000 --- a/thirdparty/onednn_gpu +++ b/thirdparty/onednn_gpu @@ -1 +1 @@ -Subproject commit 1ab3f50381ae85ab5ab0a29d5565ebe72ce77030 +Subproject commit 75d978369d0c5be04ec36c3cea2e00a14da1ec83 From 1319384730554587e2d2f3d3630d191ed44f995f Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 10 Nov 2021 18:25:51 +0300 Subject: [PATCH 37/46] Install rules for static libraries case (#8384) * Proper cmake install for static libraries case * Added an ability to skip template plugin * Added install rules for VPU / GPU * Install more libraries * Fixed absolute TBB include paths * Disable GNA * Fixed issue with linker * Some fixes * Fixed linkage issues in tests * Disabled some tests * Updated CI pipelines * Fixed Windows linkage * Fixed custom_opset test for static casr * Fixed CVS-70313 * Continue on error * Fixed clanf-format * Try to fix Windows linker * Fixed compilation * Disable samples * Fixed samples build with THREADING=SEQ * Fixed link error on Windows * Fixed ieFuncTests * Added static Azure CI * Revert "Fixed link error on Windows" This reverts commit 78cca36fd21cdbd639216df6cca10df7f88bce3e. * Merge static and dynamic linux pipelines * Fixed Azure --- .ci/azure/linux.yml | 18 +++ CMakeLists.txt | 2 +- .../IEDevScriptsConfig.cmake | 9 ++ .../plugins/create_plugins_hpp.cmake | 4 - cmake/developer_package/plugins/plugins.cmake | 2 + cmake/extra_modules.cmake | 137 +++++++++--------- cmake/features.cmake | 4 +- cmake/templates/OpenVINOConfig.cmake.in | 12 ++ cmake/toolchains/ia32.linux.toolchain.cmake | 3 + docs/CMakeLists.txt | 14 +- docs/template_plugin/src/CMakeLists.txt | 12 +- inference-engine/cmake/ie_parallel.cmake | 7 +- .../ie_bridges/c/src/CMakeLists.txt | 4 + .../ie_bridges/c/tests/ie_c_api_test.cpp | 5 + .../offline_transformations/CMakeLists.txt | 6 +- .../src/openvino/test_utils/CMakeLists.txt | 6 +- inference-engine/src/CMakeLists.txt | 4 +- .../src/gna_plugin/CMakeLists.txt | 11 +- .../src/hetero_plugin/CMakeLists.txt | 2 +- .../src/inference_engine/CMakeLists.txt | 126 +++++++++------- .../src/legacy_api/CMakeLists.txt | 17 ++- .../CMakeLists.txt | 19 ++- .../src/preprocessing/CMakeLists.txt | 13 +- inference-engine/src/readers/CMakeLists.txt | 4 +- .../src/readers/ir_reader_v7/CMakeLists.txt | 4 + inference-engine/src/snippets/CMakeLists.txt | 12 +- .../src/vpu/common/CMakeLists.txt | 4 +- .../src/vpu/graph_transformer/CMakeLists.txt | 3 +- .../inference_engine/core_threading.cpp | 5 + .../ov_shared_object_test.cpp | 2 +- .../shared_object_loader_test.cpp | 2 +- .../functional/plugin/shared/CMakeLists.txt | 1 + .../behavior/ov_plugin/core_integration.hpp | 12 +- .../behavior/plugin/core_integration.hpp | 4 + .../behavior/shared_tests/CMakeLists.txt | 1 + .../functional/gna/CMakeLists.txt | 3 +- inference-engine/thirdparty/CMakeLists.txt | 2 + .../clDNN/kernel_selector/CMakeLists.txt | 1 + .../thirdparty/clDNN/runtime/CMakeLists.txt | 7 +- .../thirdparty/clDNN/src/CMakeLists.txt | 4 +- .../thirdparty/movidius/XLink/CMakeLists.txt | 10 +- .../thirdparty/movidius/mvnc/CMakeLists.txt | 8 +- ngraph/core/CMakeLists.txt | 10 +- ngraph/core/builder/CMakeLists.txt | 13 +- .../core/include/openvino/core/extension.hpp | 6 +- ngraph/core/reference/CMakeLists.txt | 13 +- ngraph/core/shape_inference/CMakeLists.txt | 13 +- .../frontend/frontend_manager/CMakeLists.txt | 4 +- ngraph/test/engines_util/CMakeLists.txt | 2 +- ngraph/test/extension.cpp | 41 +----- ngraph/test/frontend/shared/CMakeLists.txt | 2 +- ngraph/test/opset.cpp | 17 ++- ngraph/test/runtime/CMakeLists.txt | 3 +- .../conditional_compilation/CMakeLists.txt | 5 +- openvino/itt/CMakeLists.txt | 5 +- openvino/pp/CMakeLists.txt | 5 +- openvino/util/CMakeLists.txt | 5 +- thirdparty/CMakeLists.txt | 5 + thirdparty/ittapi/CMakeLists.txt | 6 + thirdparty/ocl/CMakeLists.txt | 11 +- 60 files changed, 423 insertions(+), 269 deletions(-) diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index 16c9969065c834..e099ff08fa156c 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -21,6 +21,14 @@ resources: jobs: - job: Lin + strategy: + matrix: + Dynamic: + CMAKE_BUILD_SHARED_LIBS: 'ON' + Static: + CMAKE_BUILD_SHARED_LIBS: 'OFF' + maxParallel: 2 + # About 150% of total time timeoutInMinutes: 90 @@ -126,6 +134,9 @@ jobs: -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON + -DBUILD_SHARED_LIBS=$(CMAKE_BUILD_SHARED_LIBS) + -DENABLE_GNA=$(CMAKE_BUILD_SHARED_LIBS) + -DENABLE_CLDNN=$(CMAKE_BUILD_SHARED_LIBS) -DPYTHON_EXECUTABLE=/usr/bin/python3.8 -DENABLE_WHEEL=ON -DENABLE_TESTS=ON @@ -183,10 +194,12 @@ jobs: - script: $(INSTALL_DIR)/samples/cpp/build_samples.sh workingDirectory: $(BUILD_SAMPLES_DIR) displayName: 'Build cpp samples' + condition: eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'ON') - script: $(INSTALL_DIR)/samples/c/build_samples.sh workingDirectory: $(BUILD_SAMPLES_DIR) displayName: 'Build c samples' + condition: eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'ON') - script: rm -fr $(BUILD_DIR) displayName: 'Clean build dir' @@ -199,6 +212,7 @@ jobs: . $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_TEST_DIR)/pyngraph --junitxml=TEST-Pyngraph.xml --ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_zoo_models.py --ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_backend.py displayName: 'nGraph Python Bindings Tests' continueOnError: false + condition: eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'ON') - script: | export MO_ROOT=$(INSTALL_DIR)/tools/model_optimizer @@ -218,6 +232,7 @@ jobs: - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/tensorflow_tests --gtest_print_time=1 --gtest_output=xml:TEST-Tensorflow.xml displayName: 'Tensorflow Frontend UT' continueOnError: false + condition: eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'ON') # . $(SETUPVARS) && python3 $(WORK_DIR)/gtest-parallel/gtest_parallel.py $(INSTALL_TEST_DIR)/InferenceEngineUnitTests --workers=16 --dump_json_test_results=InferenceEngineUnitTests.json --gtest_filter=*smoke* -- --gtest_print_time=1 - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/InferenceEngineUnitTests --gtest_print_time=1 --gtest_output=xml:TEST-InferenceEngineUnitTests.xml @@ -235,6 +250,7 @@ jobs: - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/gnaUnitTests --gtest_output=xml:TEST-gnaUnitTests.xml displayName: 'GNA UT' continueOnError: false + condition: eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'ON') - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/vpuUnitTests --gtest_output=xml:TEST-vpuUnitTests.xml displayName: 'VPU UT' @@ -251,6 +267,7 @@ jobs: - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/templateFuncTests --gtest_filter=*smoke* --gtest_output=xml:TEST-templateFuncTests.xml displayName: 'TEMPLATE FuncTests' continueOnError: false + condition: eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'ON') - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/cpuFuncTests --gtest_filter=*smoke* --gtest_print_time=1 --gtest_output=xml:TEST-cpuFuncTests.xml displayName: 'CPU FuncTests' @@ -270,6 +287,7 @@ jobs: . $(SETUPVARS) -pyver 3.8 && python3 -m pytest --junitxml=TEST-PythonAPI.xml displayName: 'Python API Tests' continueOnError: false + condition: eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'ON') - script: | . $(SETUPVARS) diff --git a/CMakeLists.txt b/CMakeLists.txt index b7b7f1fb86fa48..ef69fe8ed73730 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -85,8 +85,8 @@ include(cmake/test_model_zoo.cmake) add_subdirectory(thirdparty) add_subdirectory(openvino) add_subdirectory(ngraph) -add_subdirectory(runtime) add_subdirectory(inference-engine) +add_subdirectory(runtime) # for Template plugin openvino_developer_export_targets(COMPONENT ngraph TARGETS ngraph_backend interpreter_backend) diff --git a/cmake/developer_package/IEDevScriptsConfig.cmake b/cmake/developer_package/IEDevScriptsConfig.cmake index 2bdb7e6123656c..5afa256f25def7 100644 --- a/cmake/developer_package/IEDevScriptsConfig.cmake +++ b/cmake/developer_package/IEDevScriptsConfig.cmake @@ -208,6 +208,15 @@ endif() # General flags +macro(ov_install_static_lib target comp) + if(NOT BUILD_SHARED_LIBS) + install(TARGETS ${target} EXPORT OpenVINOTargets + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ${comp} + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ${comp} + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ${comp}) + endif() +endmacro() + set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) diff --git a/cmake/developer_package/plugins/create_plugins_hpp.cmake b/cmake/developer_package/plugins/create_plugins_hpp.cmake index c63f964660fb6b..4d73b0bca3bbc2 100644 --- a/cmake/developer_package/plugins/create_plugins_hpp.cmake +++ b/cmake/developer_package/plugins/create_plugins_hpp.cmake @@ -54,8 +54,4 @@ endforeach() set(IE_PLUGINS_MAP_DEFINITION "${IE_PLUGINS_MAP_DEFINITION} };\n") - -message("${IE_PLUGINS_DECLARATIONS}") -message("${IE_PLUGINS_MAP_DEFINITION}") - configure_file("${IE_PLUGINS_HPP_HEADER_IN}" "${IE_PLUGINS_HPP_HEADER}" @ONLY) diff --git a/cmake/developer_package/plugins/plugins.cmake b/cmake/developer_package/plugins/plugins.cmake index 1d9277fff2d572..dd9ea3463018c4 100644 --- a/cmake/developer_package/plugins/plugins.cmake +++ b/cmake/developer_package/plugins/plugins.cmake @@ -75,6 +75,8 @@ function(ie_add_plugin) target_compile_definitions(${IE_PLUGIN_NAME} PRIVATE IE_CREATE_EXTENSION=CreateExtensionShared${IE_PLUGIN_DEVICE_NAME}) endif() + # install static plugins + ov_install_static_lib(${IE_PLUGIN_NAME} core) endif() ie_add_vs_version_file(NAME ${IE_PLUGIN_NAME} diff --git a/cmake/extra_modules.cmake b/cmake/extra_modules.cmake index e9027e21f06502..8e2ba6b5cd3047 100644 --- a/cmake/extra_modules.cmake +++ b/cmake/extra_modules.cmake @@ -1,25 +1,28 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# function(ie_generate_dev_package_config) -# dummy check that OpenCV is here -find_package(OpenCV QUIET) - -set(all_dev_targets gflags ie_libraries) -foreach(component IN LISTS openvino_export_components) - export(TARGETS ${${component}} NAMESPACE IE:: - APPEND FILE "${CMAKE_BINARY_DIR}/${component}_dev_targets.cmake") - list(APPEND all_dev_targets ${${component}}) -endforeach() -add_custom_target(ie_dev_targets ALL DEPENDS ${all_dev_targets}) - -configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in" - "${CMAKE_BINARY_DIR}/InferenceEngineDeveloperPackageConfig.cmake" - INSTALL_DESTINATION share # not used - PATH_VARS "OpenVINO_SOURCE_DIR;gflags_BINARY_DIR" - NO_CHECK_REQUIRED_COMPONENTS_MACRO) - -configure_file("${OpenVINO_SOURCE_DIR}/cmake/templates/InferenceEngineConfig-version.cmake.in" - "${CMAKE_BINARY_DIR}/InferenceEngineDeveloperPackageConfig-version.cmake" - @ONLY) + # dummy check that OpenCV is here + find_package(OpenCV QUIET) + + set(all_dev_targets gflags ie_libraries) + foreach(component IN LISTS openvino_export_components) + export(TARGETS ${${component}} NAMESPACE IE:: + APPEND FILE "${CMAKE_BINARY_DIR}/${component}_dev_targets.cmake") + list(APPEND all_dev_targets ${${component}}) + endforeach() + add_custom_target(ie_dev_targets ALL DEPENDS ${all_dev_targets}) + + configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in" + "${CMAKE_BINARY_DIR}/InferenceEngineDeveloperPackageConfig.cmake" + INSTALL_DESTINATION share # not used + PATH_VARS "OpenVINO_SOURCE_DIR;gflags_BINARY_DIR" + NO_CHECK_REQUIRED_COMPONENTS_MACRO) + + configure_file("${OpenVINO_SOURCE_DIR}/cmake/templates/InferenceEngineConfig-version.cmake.in" + "${CMAKE_BINARY_DIR}/InferenceEngineDeveloperPackageConfig-version.cmake" + @ONLY) endfunction() ie_generate_dev_package_config() @@ -29,60 +32,62 @@ ie_generate_dev_package_config() # function(register_extra_modules) -# post export -openvino_developer_export_targets(COMPONENT inference_engine TARGETS inference_engine) -openvino_developer_export_targets(COMPONENT ngraph TARGETS ngraph) + # post export + openvino_developer_export_targets(COMPONENT inference_engine TARGETS inference_engine) + openvino_developer_export_targets(COMPONENT ngraph TARGETS ngraph) -set(InferenceEngineDeveloperPackage_DIR "${CMAKE_CURRENT_BINARY_DIR}/runtime") + set(InferenceEngineDeveloperPackage_DIR "${CMAKE_CURRENT_BINARY_DIR}/runtime") -function(generate_fake_dev_package) - set(iedevconfig_file "${InferenceEngineDeveloperPackage_DIR}/InferenceEngineDeveloperPackageConfig.cmake") - file(REMOVE "${iedevconfig_file}") + function(generate_fake_dev_package) + set(iedevconfig_file "${InferenceEngineDeveloperPackage_DIR}/InferenceEngineDeveloperPackageConfig.cmake") + file(REMOVE "${iedevconfig_file}") - file(WRITE "${iedevconfig_file}" "\# !! AUTOGENERATED: DON'T EDIT !!\n\n") - file(APPEND "${iedevconfig_file}" "ie_deprecated_no_errors()\n") - - foreach(target IN LISTS ${openvino_export_components}) - if(target) - file(APPEND "${iedevconfig_file}" "add_library(IE::${target} ALIAS ${target})\n") - endif() - endforeach() -endfunction() + file(WRITE "${iedevconfig_file}" "\# !! AUTOGENERATED: DON'T EDIT !!\n\n") + file(APPEND "${iedevconfig_file}" "ie_deprecated_no_errors()\n") -generate_fake_dev_package() + foreach(target IN LISTS ${openvino_export_components}) + if(target) + file(APPEND "${iedevconfig_file}" "add_library(IE::${target} ALIAS ${target})\n") + endif() + endforeach() + endfunction() -# automatically import plugins from the 'plugins' folder -file(GLOB local_extra_modules "runtime/plugins/*") -# add template plugin -list(APPEND local_extra_modules "${OpenVINO_SOURCE_DIR}/docs/template_plugin") + generate_fake_dev_package() -# detect where IE_EXTRA_MODULES contains folders with CMakeLists.txt -# other folders are supposed to have sub-folders with CMakeLists.txt -foreach(module_path IN LISTS IE_EXTRA_MODULES) - if(EXISTS "${module_path}/CMakeLists.txt") - list(APPEND extra_modules "${module_path}") - elseif(module_path) - file(GLOB extra_modules ${extra_modules} "${module_path}/*") + # automatically import plugins from the 'plugins' folder + file(GLOB local_extra_modules "runtime/plugins/*") + # add template plugin + if(ENABLE_TEMPLATE) + list(APPEND local_extra_modules "${OpenVINO_SOURCE_DIR}/docs/template_plugin") endif() -endforeach() - -# add each extra module -foreach(module_path IN LISTS extra_modules local_extra_modules) - if(module_path) - get_filename_component(module_name "${module_path}" NAME) - set(build_module ON) - if(NOT EXISTS "${module_path}/CMakeLists.txt") # if module is built not using cmake - set(build_module OFF) - endif() - if(NOT DEFINED BUILD_${module_name}) - set(BUILD_${module_name} ${build_module} CACHE BOOL "Build ${module_name} extra module" FORCE) + + # detect where IE_EXTRA_MODULES contains folders with CMakeLists.txt + # other folders are supposed to have sub-folders with CMakeLists.txt + foreach(module_path IN LISTS IE_EXTRA_MODULES) + if(EXISTS "${module_path}/CMakeLists.txt") + list(APPEND extra_modules "${module_path}") + elseif(module_path) + file(GLOB extra_modules ${extra_modules} "${module_path}/*") endif() - if(BUILD_${module_name}) - message(STATUS "Register ${module_name} to be built in build-modules/${module_name}") - add_subdirectory("${module_path}" "build-modules/${module_name}") + endforeach() + + # add each extra module + foreach(module_path IN LISTS extra_modules local_extra_modules) + if(module_path) + get_filename_component(module_name "${module_path}" NAME) + set(build_module ON) + if(NOT EXISTS "${module_path}/CMakeLists.txt") # if module is built not using cmake + set(build_module OFF) + endif() + if(NOT DEFINED BUILD_${module_name}) + set(BUILD_${module_name} ${build_module} CACHE BOOL "Build ${module_name} extra module" FORCE) + endif() + if(BUILD_${module_name}) + message(STATUS "Register ${module_name} to be built in build-modules/${module_name}") + add_subdirectory("${module_path}" "build-modules/${module_name}") + endif() endif() - endif() -endforeach() + endforeach() endfunction() -register_extra_modules() \ No newline at end of file +register_extra_modules() diff --git a/cmake/features.cmake b/cmake/features.cmake index 846d486d922c53..d5c2303ced5964 100644 --- a/cmake/features.cmake +++ b/cmake/features.cmake @@ -90,6 +90,8 @@ ie_option (ENABLE_MULTI "Enables Multi Device Plugin" ON) ie_option (ENABLE_HETERO "Enables Hetero Device Plugin" ON) +ie_option (ENABLE_TEMPLATE "Enable template plugin" ON) + ie_dependent_option (ENABLE_VPU "vpu targeted plugins for inference engine" ON "NOT WINDOWS_PHONE;NOT WINDOWS_STORE" OFF) ie_dependent_option (ENABLE_MYRIAD "myriad targeted plugin for inference engine" ON "ENABLE_VPU" OFF) @@ -118,7 +120,7 @@ set(IE_EXTRA_MODULES "" CACHE STRING "Extra paths for extra modules to include i ie_dependent_option(ENABLE_TBB_RELEASE_ONLY "Only Release TBB libraries are linked to the Inference Engine binaries" ON "THREADING MATCHES TBB;LINUX" OFF) -ie_option (ENABLE_SYSTEM_PUGIXML "use the system copy of pugixml" OFF) +ie_dependent_option (ENABLE_SYSTEM_PUGIXML "use the system copy of pugixml" OFF "BUILD_SHARED_LIBS" OFF) ie_option (ENABLE_DEBUG_CAPS "enable OpenVINO debug capabilities at runtime" OFF) diff --git a/cmake/templates/OpenVINOConfig.cmake.in b/cmake/templates/OpenVINOConfig.cmake.in index 818080a0ac8581..bb21d19b400d48 100644 --- a/cmake/templates/OpenVINOConfig.cmake.in +++ b/cmake/templates/OpenVINOConfig.cmake.in @@ -160,8 +160,20 @@ if(THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO" AND NOT TBB_FOUND) ${_tbb_dir} NO_CMAKE_FIND_ROOT_PATH NO_DEFAULT_PATH) + + set(install_tbbbind "@install_tbbbind@") + if(install_tbbbind) + set_and_check(_tbb_bind_dir "@PACKAGE_IE_TBBBIND_DIR@") + _ov_find_dependency(TBBBIND_2_5 + PATHS ${_tbb_bind_dir} + NO_CMAKE_FIND_ROOT_PATH + NO_DEFAULT_PATH) + set_target_properties(${TBBBIND_2_5_IMPORTED_TARGETS} PROPERTIES IMPORTED_GLOBAL ON) + endif() endif() +_ov_find_dependency(Threads) + if(NOT TARGET inference_engine) set(_ov_as_external_package ON) include("${CMAKE_CURRENT_LIST_DIR}/OpenVINOTargets.cmake") diff --git a/cmake/toolchains/ia32.linux.toolchain.cmake b/cmake/toolchains/ia32.linux.toolchain.cmake index 4526bc3ab9de81..f5c24fffb89fe2 100644 --- a/cmake/toolchains/ia32.linux.toolchain.cmake +++ b/cmake/toolchains/ia32.linux.toolchain.cmake @@ -17,5 +17,8 @@ macro(_set_if_not_defined var val) endif() endmacro() +# for ittapi +_set_if_not_defined(FORCE_32 ON) + # need libusb 32-bits version _set_if_not_defined(ENABLE_VPU OFF) diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index 93d1feed4050d1..b2ccee1dcdd1fc 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -23,8 +23,8 @@ if(NOT ENABLE_DOCKER) add_subdirectory(template_extension) set(all_docs_targets - ie_docs_snippets - template_extension templatePlugin templateFuncTests) + ie_docs_snippets templateFuncTests + template_extension template_ov_extension templatePlugin) foreach(target_name IN LISTS all_docs_targets) if(TARGET ${target_name}) set_target_properties(${target_name} PROPERTIES FOLDER docs) @@ -36,8 +36,14 @@ if(NOT ENABLE_DOCKER) # install - install(TARGETS templatePlugin template_extension template_ov_extension - LIBRARY DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL) + foreach(target templatePlugin template_extension template_ov_extension) + if(TARGET ${target}) + install(TARGETS ${target} + LIBRARY DESTINATION ${IE_CPACK_RUNTIME_PATH} + COMPONENT tests + EXCLUDE_FROM_ALL) + endif() + endforeach() endif() set(OMZ_DOCS_DIR "" CACHE PATH "Path to open_model_zoo documentation") diff --git a/docs/template_plugin/src/CMakeLists.txt b/docs/template_plugin/src/CMakeLists.txt index a6411523780629..e22bb0a6dedfa3 100644 --- a/docs/template_plugin/src/CMakeLists.txt +++ b/docs/template_plugin/src/CMakeLists.txt @@ -40,13 +40,5 @@ set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_REL # POSSIBLE_PLUGINS ${TARGET_NAME}) # [cmake:plugin] -# ATTENTION: uncomment to install component -# install - -# set(component_name template) -# ie_cpack_add_component(${component_name} REQUIRED) - -# install(TARGETS ${TARGET_NAME} -# RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} -# LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} -# COMPONENT ${component_name}) +ov_install_static_lib(ngraph_backend ngraph) +ov_install_static_lib(interpreter_backend ngraph) diff --git a/inference-engine/cmake/ie_parallel.cmake b/inference-engine/cmake/ie_parallel.cmake index 6c87d9b4eb940d..4f724840d377d9 100644 --- a/inference-engine/cmake/ie_parallel.cmake +++ b/inference-engine/cmake/ie_parallel.cmake @@ -91,7 +91,10 @@ function(set_ie_threading_interface_for TARGET_NAME) if(TARGET ${library}) get_target_property(include_directories ${library} INTERFACE_INCLUDE_DIRECTORIES) if(include_directories) - target_include_directories(${TARGET_NAME} SYSTEM BEFORE ${LINK_TYPE} ${include_directories}) + foreach(include_directory IN LISTS include_directories) + target_include_directories(${TARGET_NAME} SYSTEM BEFORE + ${LINK_TYPE} $) + endforeach() endif() endif() endforeach() @@ -178,6 +181,6 @@ function(set_ie_threading_interface_for TARGET_NAME) if (NOT THREADING STREQUAL "SEQ") find_package(Threads REQUIRED) - ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${CMAKE_THREAD_LIBS_INIT}) + ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} Threads::Threads) endif() endfunction(set_ie_threading_interface_for) diff --git a/inference-engine/ie_bridges/c/src/CMakeLists.txt b/inference-engine/ie_bridges/c/src/CMakeLists.txt index ecc8165ca349fa..4a60879bb52073 100644 --- a/inference-engine/ie_bridges/c/src/CMakeLists.txt +++ b/inference-engine/ie_bridges/c/src/CMakeLists.txt @@ -17,6 +17,10 @@ target_link_libraries(${TARGET_NAME} PRIVATE inference_engine) target_include_directories(${TARGET_NAME} PUBLIC $) +if(NOT BUILD_SHARED_LIBS) + target_compile_definitions(${TARGET_NAME} PUBLIC OPENVINO_STATIC_LIBRARY) +endif() + add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) diff --git a/inference-engine/ie_bridges/c/tests/ie_c_api_test.cpp b/inference-engine/ie_bridges/c/tests/ie_c_api_test.cpp index 0a7ce006800c5a..d422448bc69907 100644 --- a/inference-engine/ie_bridges/c/tests/ie_c_api_test.cpp +++ b/inference-engine/ie_bridges/c/tests/ie_c_api_test.cpp @@ -145,6 +145,9 @@ TEST(ie_core_get_available_devices, getAvailableDevices) { ie_core_free(&core); } +// TODO: CVS-68982 +#ifndef OPENVINO_STATIC_LIBRARY + TEST(ie_core_register_plugin, registerPlugin) { ie_core_t *core = nullptr; IE_ASSERT_OK(ie_core_create("", &core)); @@ -213,6 +216,8 @@ TEST(ie_core_unregister_plugin, unregisterPlugin) { ie_core_free(&core); } +#endif // !OPENVINO_STATIC_LIBRARY + TEST(ie_core_set_config, setConfig) { ie_core_t *core = nullptr; IE_ASSERT_OK(ie_core_create("", &core)); diff --git a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt index bc9115229e621c..72a69fb5904ad6 100644 --- a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt @@ -29,10 +29,10 @@ if(COMMAND ie_add_vs_version_file) FILEDESCRIPTION "Offline Transformatoins Python library") endif() -if(TARGET offline_transformations) - list(APPEND link_libraries offline_transformations) -else() +if(InferenceEngineDeveloperPackage_FOUND) list(APPEND link_libraries IE::offline_transformations) +else() + list(APPEND link_libraries offline_transformations) endif() target_include_directories(${TARGET_NAME} SYSTEM PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../inference_engine") diff --git a/inference-engine/ie_bridges/python/src/openvino/test_utils/CMakeLists.txt b/inference-engine/ie_bridges/python/src/openvino/test_utils/CMakeLists.txt index c6ae33c7951305..86dd25996b90ee 100644 --- a/inference-engine/ie_bridges/python/src/openvino/test_utils/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/src/openvino/test_utils/CMakeLists.txt @@ -29,10 +29,10 @@ if(COMMAND ie_add_vs_version_file) FILEDESCRIPTION "Test Utils Python library") endif() -if(TARGET commonTestUtils) - list(APPEND link_libraries commonTestUtils) -else() +if(InferenceEngineDeveloperPackage_FOUND) list(APPEND link_libraries IE::commonTestUtils) +else() + list(APPEND link_libraries commonTestUtils) endif() target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}") diff --git a/inference-engine/src/CMakeLists.txt b/inference-engine/src/CMakeLists.txt index 57863449d3d67b..b74d4b2830b4a4 100644 --- a/inference-engine/src/CMakeLists.txt +++ b/inference-engine/src/CMakeLists.txt @@ -9,8 +9,6 @@ endif() add_subdirectory(transformations) -add_subdirectory(legacy_api) - add_subdirectory(low_precision_transformations) add_subdirectory(offline_transformations) @@ -43,6 +41,8 @@ endif() add_subdirectory(inference_engine) +add_subdirectory(legacy_api) + add_subdirectory(readers) add_subdirectory(preprocessing) diff --git a/inference-engine/src/gna_plugin/CMakeLists.txt b/inference-engine/src/gna_plugin/CMakeLists.txt index a476318dc03e89..3ec2d5b089c6c3 100644 --- a/inference-engine/src/gna_plugin/CMakeLists.txt +++ b/inference-engine/src/gna_plugin/CMakeLists.txt @@ -65,10 +65,13 @@ target_compile_definitions(${TARGET_NAME}_test_static INTEGER_LOW_P USE_STATIC_IE) -target_link_libraries(${TARGET_NAME}_test_static PUBLIC inference_engine_preproc_s inference_engine_transformations libGNA::API) -target_include_directories(${TARGET_NAME}_test_static PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} - $ - PRIVATE $) +target_link_libraries(${TARGET_NAME}_test_static PUBLIC inference_engine_s inference_engine_preproc_s inference_engine_transformations libGNA::API) +target_include_directories(${TARGET_NAME}_test_static + PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR} + $ + PRIVATE + $) set_target_properties(${TARGET_NAME}_test_static PROPERTIES COMPILE_PDB_NAME ${TARGET_NAME}_test_static) set_target_properties(${TARGET_NAME} ${TARGET_NAME}_test_static diff --git a/inference-engine/src/hetero_plugin/CMakeLists.txt b/inference-engine/src/hetero_plugin/CMakeLists.txt index c7fbe6eecf8b65..39aa7191bd3fd0 100644 --- a/inference-engine/src/hetero_plugin/CMakeLists.txt +++ b/inference-engine/src/hetero_plugin/CMakeLists.txt @@ -16,7 +16,7 @@ ie_faster_build(${TARGET_NAME} UNITY ) -target_link_libraries(${TARGET_NAME} PRIVATE pugixml inference_engine +target_link_libraries(${TARGET_NAME} PRIVATE pugixml::static inference_engine ngraph inference_engine_transformations) ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt index 4dc8fe70d705bf..bff4002cbdcc20 100644 --- a/inference-engine/src/inference_engine/CMakeLists.txt +++ b/inference-engine/src/inference_engine/CMakeLists.txt @@ -6,8 +6,11 @@ set (TARGET_NAME "inference_engine") if(THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO") find_package(TBBBIND_2_5 QUIET) - if (TBBBIND_2_5_FOUND) - message(STATUS "Static tbbbind_2_5 package was found") + if(TBBBIND_2_5_FOUND) + message(STATUS "Static tbbbind_2_5 package is found") + if(NOT BUILD_SHARED_LIBS) + set(install_tbbbind ON) + endif() endif() endif() @@ -101,10 +104,10 @@ source_group("include" FILES ${LIBRARY_HEADERS} ${PUBLIC_HEADERS}) add_library(${TARGET_NAME}_plugin_api INTERFACE) target_include_directories(${TARGET_NAME}_plugin_api INTERFACE - "${IE_MAIN_SOURCE_DIR}/src/plugin_api" $ - ${PUBLIC_HEADERS_DIR} - ${PUBLIC_HEADERS_DIR}/ie) + $ + $ + $) target_link_libraries(${TARGET_NAME}_plugin_api INTERFACE pugixml::static openvino::itt openvino::util) @@ -198,44 +201,34 @@ ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) # Static library used for unit tests which are always built -if(BUILD_SHARED_LIBS) - add_library(${TARGET_NAME}_s STATIC EXCLUDE_FROM_ALL - $ - $ - ${IE_STATIC_DEPENDENT_FILES}) - - set_ie_threading_interface_for(${TARGET_NAME}_s) - if (TBBBIND_2_5_FOUND) - target_compile_definitions(${TARGET_NAME}_s PRIVATE -DTBBBIND_2_5_AVAILABLE) - target_link_libraries(${TARGET_NAME}_s PRIVATE ${TBBBIND_2_5_IMPORTED_TARGETS}) - endif() +add_library(${TARGET_NAME}_s STATIC EXCLUDE_FROM_ALL + $ + $ + ${IE_STATIC_DEPENDENT_FILES}) - target_include_directories(${TARGET_NAME}_s PUBLIC - $ - "${CMAKE_CURRENT_SOURCE_DIR}/src" - "${IE_MAIN_SOURCE_DIR}/src/legacy_api/src") +set_ie_threading_interface_for(${TARGET_NAME}_s) +if (TBBBIND_2_5_FOUND) + target_compile_definitions(${TARGET_NAME}_s PRIVATE -DTBBBIND_2_5_AVAILABLE) + target_link_libraries(${TARGET_NAME}_s PRIVATE ${TBBBIND_2_5_IMPORTED_TARGETS}) +endif() - if(WIN32) - set_target_properties(${TARGET_NAME}_s PROPERTIES COMPILE_PDB_NAME ${TARGET_NAME}_s) - endif() +target_include_directories(${TARGET_NAME}_s PUBLIC + $ + $ + $) - target_link_libraries(${TARGET_NAME}_s PRIVATE openvino::itt ${CMAKE_DL_LIBS} ngraph - frontend_manager::static inference_engine_transformations pugixml::static) +if(WIN32) + set_target_properties(${TARGET_NAME}_s PROPERTIES COMPILE_PDB_NAME ${TARGET_NAME}_s) +endif() - target_compile_definitions(${TARGET_NAME}_s PUBLIC USE_STATIC_IE) +target_link_libraries(${TARGET_NAME}_s PRIVATE openvino::itt ${CMAKE_DL_LIBS} ngraph + frontend_manager::static inference_engine_preproc_s inference_engine_transformations pugixml::static) - set_target_properties(${TARGET_NAME}_s PROPERTIES - EXCLUDE_FROM_ALL ON - INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) -else() - # for static OpenVINO build we can re-use inference_engine which is already static - add_library(${TARGET_NAME}_s ALIAS ${TARGET_NAME}) +target_compile_definitions(${TARGET_NAME}_s PUBLIC USE_STATIC_IE) - target_include_directories(${TARGET_NAME} PUBLIC - $ - "${CMAKE_CURRENT_SOURCE_DIR}/src" - "${IE_MAIN_SOURCE_DIR}/src/legacy_api/src") -endif() +set_target_properties(${TARGET_NAME}_s PROPERTIES + EXCLUDE_FROM_ALL ON + INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) # LTO @@ -263,6 +256,11 @@ if(THREADING MATCHES "^(TBB|TBB_AUTO)$") list(APPEND PATH_VARS "IE_TBB_DIR") endif() +if(install_tbbbind) + set(IE_TBBBIND_DIR "${TBBBIND_2_5}") + list(APPEND PATH_VARS "IE_TBBBIND_DIR") +endif() + # install only downloaded TBB, system one is not installed if(THREADING MATCHES "^(TBB|TBB_AUTO)$" AND TBBROOT MATCHES ${TEMP}) ie_cpack_add_component(tbb REQUIRED) @@ -285,13 +283,26 @@ if(THREADING MATCHES "^(TBB|TBB_AUTO)$" AND TBBROOT MATCHES ${TEMP}) COMPONENT tbb) set(IE_TBB_DIR_INSTALL "3rdparty/tbb/cmake") - install(FILES "${TBB}/cmake/TBBConfig.cmake" "${TBB}/cmake/TBBConfigVersion.cmake" DESTINATION runtime/${IE_TBB_DIR_INSTALL} COMPONENT tbb) endif() +if(install_tbbbind) + install(DIRECTORY "${TBBBIND_2_5}/lib" + DESTINATION runtime/3rdparty/tbb_bind_2_5 + COMPONENT tbb) + install(FILES "${TBBBIND_2_5}/LICENSE" + DESTINATION runtime/3rdparty/tbb_bind_2_5 + COMPONENT tbb) + + set(IE_TBBBIND_DIR_INSTALL "3rdparty/tbb_bind_2_5/cmake") + install(FILES "${TBBBIND_2_5}/cmake/TBBBIND_2_5Config.cmake" + DESTINATION runtime/${IE_TBBBIND_DIR_INSTALL} + COMPONENT tbb) +endif() + # Install Inference Engine ie_cpack_add_component(core REQUIRED DEPENDS ${core_components}) @@ -309,28 +320,32 @@ install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets # TODO: remove later once samples are updated runtime/include/ie) -install(FILES $/plugins.xml - DESTINATION ${IE_CPACK_RUNTIME_PATH} - COMPONENT core) - -# for InferenceEngineUnitTest -if(WIN32) - install(FILES $/plugins.xml - DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) -else() +if(BUILD_SHARED_LIBS) install(FILES $/plugins.xml - DESTINATION tests/lib COMPONENT tests EXCLUDE_FROM_ALL) + DESTINATION ${IE_CPACK_RUNTIME_PATH} + COMPONENT core) + + # for InferenceEngineUnitTest + if(WIN32) + install(FILES $/plugins.xml + DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) + else() + install(FILES $/plugins.xml + DESTINATION tests/lib COMPONENT tests EXCLUDE_FROM_ALL) + endif() endif() +# Install static libraries for case BUILD_SHARED_LIBS=OFF + +ov_install_static_lib(${TARGET_NAME}_plugin_api core) + # Install cmake scripts -if(BUILD_SHARED_LIBS) - install(EXPORT OpenVINOTargets - FILE OpenVINOTargets.cmake - NAMESPACE openvino:: - DESTINATION runtime/cmake - COMPONENT core_dev) -endif() +install(EXPORT OpenVINOTargets + FILE OpenVINOTargets.cmake + NAMESPACE openvino:: + DESTINATION runtime/cmake + COMPONENT core_dev) set(IE_NGRAPH_DIR "${CMAKE_BINARY_DIR}/ngraph") set(IE_INCLUDE_DIR "${PUBLIC_HEADERS_DIR}/ie") @@ -349,6 +364,7 @@ configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/OpenVINOCo set(IE_INCLUDE_DIR "include/ie") set(IE_NGRAPH_DIR ".") set(IE_TBB_DIR "${IE_TBB_DIR_INSTALL}") +set(IE_TBBBIND_DIR "${IE_TBBBIND_DIR_INSTALL}") set(IE_PARALLEL_CMAKE "cmake/ie_parallel.cmake") configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/InferenceEngineConfig.cmake.in" diff --git a/inference-engine/src/legacy_api/CMakeLists.txt b/inference-engine/src/legacy_api/CMakeLists.txt index 1bf44deb97cc55..d9158ef5ba139d 100644 --- a/inference-engine/src/legacy_api/CMakeLists.txt +++ b/inference-engine/src/legacy_api/CMakeLists.txt @@ -26,7 +26,7 @@ endif() file(TOUCH ${CMAKE_CURRENT_BINARY_DIR}/dummy.cpp) -add_library(${TARGET_NAME}_obj OBJECT +add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL ${LIBRARY_SRC} ${PUBLIC_HEADERS}) @@ -53,7 +53,7 @@ add_cpplint_target(${TARGET_NAME}_obj_cpplint FOR_TARGETS ${TARGET_NAME}_obj) # Create library -add_library(${TARGET_NAME} +add_library(${TARGET_NAME} EXCLUDE_FROM_ALL ${CMAKE_CURRENT_BINARY_DIR}/dummy.cpp $) @@ -64,7 +64,8 @@ target_link_libraries(${TARGET_NAME} PUBLIC inference_engine PRIVATE pugixml::static openvino::itt ngraph inference_engine_transformations) -target_include_directories(${TARGET_NAME} INTERFACE ${PUBLIC_HEADERS_DIR}) +target_include_directories(${TARGET_NAME} INTERFACE + $) ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) @@ -79,6 +80,10 @@ openvino_developer_export_targets(COMPONENT inference_engine TARGETS ${TARGET_NA # install -install(TARGETS ${TARGET_NAME} - RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core - LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core) +if(BUILD_SHARED_LIBS) + install(TARGETS ${TARGET_NAME} + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core) +else() + ov_install_static_lib(${TARGET_NAME} core) +endif() diff --git a/inference-engine/src/low_precision_transformations/CMakeLists.txt b/inference-engine/src/low_precision_transformations/CMakeLists.txt index 10c7162ecb7003..4cbb9c1b33ca0f 100644 --- a/inference-engine/src/low_precision_transformations/CMakeLists.txt +++ b/inference-engine/src/low_precision_transformations/CMakeLists.txt @@ -17,13 +17,11 @@ source_group("include" FILES ${PUBLIC_HEADERS}) # Create library -add_library(${TARGET_NAME} +add_library(${TARGET_NAME} EXCLUDE_FROM_ALL ${LIBRARY_SRC} ${PUBLIC_HEADERS}) -ie_faster_build(${TARGET_NAME} - UNITY -) +ie_faster_build(${TARGET_NAME} UNITY) ie_add_vs_version_file(NAME ${TARGET_NAME} FILEDESCRIPTION "Inference Engine LP transformations library") @@ -31,7 +29,8 @@ ie_add_vs_version_file(NAME ${TARGET_NAME} target_link_libraries(${TARGET_NAME} PUBLIC inference_engine_transformations PRIVATE openvino::itt) -target_include_directories(${TARGET_NAME} PUBLIC ${PUBLIC_HEADERS_DIR}) +target_include_directories(${TARGET_NAME} PUBLIC + $) add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) @@ -47,6 +46,10 @@ openvino_developer_export_targets(COMPONENT inference_engine TARGETS ${TARGET_NA # install -install(TARGETS ${TARGET_NAME} - RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core - LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core) +if(BUILD_SHARED_LIBS) + install(TARGETS ${TARGET_NAME} + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core) +else() + ov_install_static_lib(${TARGET_NAME} core) +endif() diff --git a/inference-engine/src/preprocessing/CMakeLists.txt b/inference-engine/src/preprocessing/CMakeLists.txt index e3c52c8236d21b..29e415e9ade136 100644 --- a/inference-engine/src/preprocessing/CMakeLists.txt +++ b/inference-engine/src/preprocessing/CMakeLists.txt @@ -139,8 +139,9 @@ else() target_link_libraries(inference_engine PRIVATE ${TARGET_NAME}) endif() -target_include_directories(${TARGET_NAME} INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}" - $) +target_include_directories(${TARGET_NAME} INTERFACE + $ + $) # Workaround to avoid warnings caused with bug in the avx512intrin.h of GCC5 if((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") AND @@ -185,5 +186,9 @@ openvino_developer_export_targets(COMPONENT inference_engine TARGETS ${TARGET_NA # install -install(TARGETS ${TARGET_NAME} - LIBRARY DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core) +if(BUILD_SHARED_LIBS) + install(TARGETS ${TARGET_NAME} + LIBRARY DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core) +else() + ov_install_static_lib(${TARGET_NAME} core) +endif() diff --git a/inference-engine/src/readers/CMakeLists.txt b/inference-engine/src/readers/CMakeLists.txt index 46028205d37932..29cd6e6c3277f2 100644 --- a/inference-engine/src/readers/CMakeLists.txt +++ b/inference-engine/src/readers/CMakeLists.txt @@ -8,9 +8,11 @@ set(TARGET_NAME inference_engine_reader_api) add_library(${TARGET_NAME} INTERFACE) target_include_directories(${TARGET_NAME} INTERFACE - "${CMAKE_CURRENT_SOURCE_DIR}/reader_api" + $ $) +ov_install_static_lib(${TARGET_NAME} core) + file(GLOB_RECURSE reader_api_hpp "${CMAKE_CURRENT_SOURCE_DIR}/reader_api/*.hpp") add_cpplint_target(${TARGET_NAME}_cpplint FOR_SOURCES ${reader_api_hpp}) diff --git a/inference-engine/src/readers/ir_reader_v7/CMakeLists.txt b/inference-engine/src/readers/ir_reader_v7/CMakeLists.txt index 846b831825be81..d53fc13abc7d1e 100644 --- a/inference-engine/src/readers/ir_reader_v7/CMakeLists.txt +++ b/inference-engine/src/readers/ir_reader_v7/CMakeLists.txt @@ -55,6 +55,8 @@ endif() add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) +# Install rules + # for ieFuncTests install(TARGETS ${TARGET_NAME} LIBRARY DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL) @@ -67,3 +69,5 @@ else() install(TARGETS ${TARGET_NAME} LIBRARY DESTINATION tests/lib COMPONENT tests EXCLUDE_FROM_ALL) endif() + +ov_install_static_lib(${TARGET_NAME} core) diff --git a/inference-engine/src/snippets/CMakeLists.txt b/inference-engine/src/snippets/CMakeLists.txt index db2c8413083c4e..334c2b8fb312a6 100644 --- a/inference-engine/src/snippets/CMakeLists.txt +++ b/inference-engine/src/snippets/CMakeLists.txt @@ -17,7 +17,7 @@ source_group("include" FILES ${PUBLIC_HEADERS}) # Create library -add_library(${TARGET_NAME} +add_library(${TARGET_NAME} EXCLUDE_FROM_ALL ${LIBRARY_SRC} ${PUBLIC_HEADERS}) @@ -53,9 +53,13 @@ openvino_developer_export_targets(COMPONENT inference_engine TARGETS ${TARGET_NA # install # TODO: uncomment once snippets are integrated into CPU plugin -# install(TARGETS ${TARGET_NAME} -# RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core -# LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core) +# if(BUILD_SHARED_LIBS) +# install(TARGETS ${TARGET_NAME} +# RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core +# LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core) +# else() +# ov_install_static_lib(${TARGET_NAME} core) +# endif() # TODO: remove once install commands above are commented out install(TARGETS ${TARGET_NAME} diff --git a/inference-engine/src/vpu/common/CMakeLists.txt b/inference-engine/src/vpu/common/CMakeLists.txt index d8b55be48257e8..6b9800394cb2ad 100644 --- a/inference-engine/src/vpu/common/CMakeLists.txt +++ b/inference-engine/src/vpu/common/CMakeLists.txt @@ -33,7 +33,7 @@ function(add_common_target TARGET_NAME STATIC_IE) target_include_directories(${TARGET_NAME} PUBLIC - "${CMAKE_CURRENT_SOURCE_DIR}/include") + $) target_include_directories(${TARGET_NAME} SYSTEM PUBLIC $ @@ -58,6 +58,8 @@ function(add_common_target TARGET_NAME STATIC_IE) if(NOT STATIC_IE) target_link_libraries(${TARGET_NAME} PUBLIC inference_engine_legacy) endif() + + ov_install_static_lib(${TARGET_NAME} myriad) endfunction() add_common_target("vpu_common_lib" FALSE) diff --git a/inference-engine/src/vpu/graph_transformer/CMakeLists.txt b/inference-engine/src/vpu/graph_transformer/CMakeLists.txt index 8fdae7418b4174..4bcf6c3295b269 100644 --- a/inference-engine/src/vpu/graph_transformer/CMakeLists.txt +++ b/inference-engine/src/vpu/graph_transformer/CMakeLists.txt @@ -38,7 +38,7 @@ function(add_graph_transformer_target TARGET_NAME STATIC_IE) target_include_directories(${TARGET_NAME} PUBLIC - "${CMAKE_CURRENT_SOURCE_DIR}/include") + $) target_include_directories(${TARGET_NAME} PUBLIC @@ -67,6 +67,7 @@ function(add_graph_transformer_target TARGET_NAME STATIC_IE) if(NOT STATIC_IE) add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME} CUSTOM_FILTERS "+runtime/explicit") openvino_developer_export_targets(COMPONENT inference_engine_vpu TARGETS ${TARGET_NAME}) + ov_install_static_lib(${TARGET_NAME} myriad) endif() set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) diff --git a/inference-engine/tests/functional/inference_engine/core_threading.cpp b/inference-engine/tests/functional/inference_engine/core_threading.cpp index c59e3538bbce90..5540fc933dbfbe 100644 --- a/inference-engine/tests/functional/inference_engine/core_threading.cpp +++ b/inference-engine/tests/functional/inference_engine/core_threading.cpp @@ -87,6 +87,9 @@ TEST_F(CoreThreadingTests, RegisterPlugin) { }, 4000); } +// TODO: CVS-68982 +#ifndef OPENVINO_STATIC_LIBRARY + // tested function: RegisterPlugins TEST_F(CoreThreadingTests, RegisterPlugins) { InferenceEngine::Core ie; @@ -123,6 +126,8 @@ TEST_F(CoreThreadingTests, RegisterPlugins) { }, 1000); } +#endif // !OPENVINO_STATIC_LIBRARY + // tested function: GetAvailableDevices, UnregisterPlugin // TODO: some initialization (e.g. thread/dlopen) sporadically fails during such stress-test scenario TEST_F(CoreThreadingTests, DISABLED_GetAvailableDevices) { diff --git a/inference-engine/tests/functional/inference_engine/ov_shared_object_test.cpp b/inference-engine/tests/functional/inference_engine/ov_shared_object_test.cpp index 8ae3850c0ee54a..30d7a7f0c92153 100644 --- a/inference-engine/tests/functional/inference_engine/ov_shared_object_test.cpp +++ b/inference-engine/tests/functional/inference_engine/ov_shared_object_test.cpp @@ -14,7 +14,7 @@ using namespace std; class SharedObjectOVTests : public ::testing::Test { protected: std::string get_mock_engine_name() { - return FileUtils::makePluginLibraryName(InferenceEngine::getIELibraryPath(), + return FileUtils::makePluginLibraryName({}, std::string("mock_engine") + IE_BUILD_POSTFIX); } diff --git a/inference-engine/tests/functional/inference_engine/shared_object_loader_test.cpp b/inference-engine/tests/functional/inference_engine/shared_object_loader_test.cpp index 411d87b3036548..fb1ab852c27305 100644 --- a/inference-engine/tests/functional/inference_engine/shared_object_loader_test.cpp +++ b/inference-engine/tests/functional/inference_engine/shared_object_loader_test.cpp @@ -15,7 +15,7 @@ using namespace InferenceEngine::details; class SharedObjectLoaderTests: public ::testing::Test { protected: std::string get_mock_engine_name() { - return FileUtils::makePluginLibraryName(getIELibraryPath(), + return FileUtils::makePluginLibraryName({}, std::string("mock_engine") + IE_BUILD_POSTFIX); } diff --git a/inference-engine/tests/functional/plugin/shared/CMakeLists.txt b/inference-engine/tests/functional/plugin/shared/CMakeLists.txt index 0a904626e1e63a..a1c7d795127784 100644 --- a/inference-engine/tests/functional/plugin/shared/CMakeLists.txt +++ b/inference-engine/tests/functional/plugin/shared/CMakeLists.txt @@ -51,6 +51,7 @@ addIeTarget( lptNgraphFunctions sharedTestClasses PRIVATE + inference_engine_legacy # CVS-55376 openvino::util inference_engine_transformations DEPENDENCIES diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp index 0de6b629da6142..1b1704b472c0ca 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp @@ -10,13 +10,11 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/unicode_utils.hpp" -#ifdef OPENVINO_OPENVINO_ENABLE_UNICODE_PATH_SUPPORT +#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT # include # define GTEST_COUT std::cerr << "[ ] [ INFO ] " # include # include - - #endif namespace ov { @@ -114,6 +112,9 @@ TEST_P(OVClassBasicTestP, registerExistingPluginThrows) { ASSERT_THROW(ie.register_plugin(pluginName, deviceName), ov::Exception); } +// TODO: CVS-68982 +#ifndef OPENVINO_STATIC_LIBRARY + TEST_P(OVClassBasicTestP, registerNewPluginNoThrows) { ov::runtime::Core ie = createCoreWithTemplate(); ASSERT_NO_THROW(ie.register_plugin(pluginName, "NEW_DEVICE_NAME")); @@ -150,7 +151,7 @@ TEST(OVClassBasicTest, smoke_createMockEngineConfigThrows) { #endif -#ifdef OPENVINO_OPENVINO_ENABLE_UNICODE_PATH_SUPPORT +#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT TEST_P(OVClassBasicTestP, smoke_registerPluginsXMLUnicodePath) { std::string pluginXML{"mock_engine_valid.xml"}; @@ -196,7 +197,8 @@ TEST_P(OVClassBasicTestP, smoke_registerPluginsXMLUnicodePath) { CommonTestUtils::removeFile(pluginXML); } -#endif // OPENVINO_OPENVINO_ENABLE_UNICODE_PATH_SUPPORT +#endif // OPENVINO_ENABLE_UNICODE_PATH_SUPPORT +#endif // !OPENVINO_STATIC_LIBRARY // // GetVersions() diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp index e95d4572dbc4aa..7213021011c6a5 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp @@ -106,6 +106,9 @@ TEST(IEClassBasicTest, smoke_createDefault) { ASSERT_NO_THROW(InferenceEngine::Core ie); } +// TODO: CVS-68982 +#ifndef OPENVINO_STATIC_LIBRARY + TEST_P(IEClassBasicTestP, registerExistingPluginThrows) { InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); ASSERT_THROW(ie.RegisterPlugin(pluginName, deviceName), InferenceEngine::Exception); @@ -193,6 +196,7 @@ TEST_P(IEClassBasicTestP, smoke_registerPluginsXMLUnicodePath) { } #endif // OPENVINO_ENABLE_UNICODE_PATH_SUPPORT +#endif // !OPENVINO_STATIC_LIBRARY // // GetVersions() diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/CMakeLists.txt b/inference-engine/tests_deprecated/behavior/shared_tests/CMakeLists.txt index e48b259815533c..b66da71d4bdab8 100644 --- a/inference-engine/tests_deprecated/behavior/shared_tests/CMakeLists.txt +++ b/inference-engine/tests_deprecated/behavior/shared_tests/CMakeLists.txt @@ -18,6 +18,7 @@ target_include_directories(${TARGET_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/pl target_link_libraries(${TARGET_NAME} PUBLIC funcTestUtils + inference_engine_legacy ieTestHelpers ) diff --git a/inference-engine/tests_deprecated/functional/gna/CMakeLists.txt b/inference-engine/tests_deprecated/functional/gna/CMakeLists.txt index fcda30314227c1..d8a66f328305a9 100644 --- a/inference-engine/tests_deprecated/functional/gna/CMakeLists.txt +++ b/inference-engine/tests_deprecated/functional/gna/CMakeLists.txt @@ -20,8 +20,7 @@ if(ENABLE_HETERO) endif() if(ENABLE_MKL_DNN) - list(APPEND DEPENDENCIES - MKLDNNPlugin) + list(APPEND DEPENDENCIES MKLDNNPlugin) endif() add_executable(${TARGET_NAME} ${TEST_SRC} ${TEST_INCLUDE}) diff --git a/inference-engine/thirdparty/CMakeLists.txt b/inference-engine/thirdparty/CMakeLists.txt index 4aea987153459d..b2ee1c12f434a9 100644 --- a/inference-engine/thirdparty/CMakeLists.txt +++ b/inference-engine/thirdparty/CMakeLists.txt @@ -24,6 +24,7 @@ if (ENABLE_CLDNN) set(CLDNN__INCLUDE_TESTS OFF CACHE BOOL "" FORCE) endif() add_subdirectory(clDNN) + ov_install_static_lib(clDNN_lib gpu) endif() function(ie_add_mkldnn) @@ -53,6 +54,7 @@ function(ie_add_mkldnn) add_subdirectory(mkl-dnn EXCLUDE_FROM_ALL) add_library(mkldnn ALIAS dnnl) + ov_install_static_lib(dnnl cpu) endfunction() if(ENABLE_MKL_DNN) diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/CMakeLists.txt b/inference-engine/thirdparty/clDNN/kernel_selector/CMakeLists.txt index 6c47bcec69d0a5..eeebbe2d9c7b02 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/CMakeLists.txt +++ b/inference-engine/thirdparty/clDNN/kernel_selector/CMakeLists.txt @@ -168,5 +168,6 @@ add_custom_command( TARGET "${CLDNN_BUILD__PROJ}" POST_BUILD COMMAND "${CMAKE_COMMAND}" -E copy_if_different ${__CLDNN_Directory__core}/cache/cache.json ${CLDNN_CACHE_PATH}/cache.json) +ov_install_static_lib("${CLDNN_BUILD__PROJ}" gpu) # ====================================================================================================== diff --git a/inference-engine/thirdparty/clDNN/runtime/CMakeLists.txt b/inference-engine/thirdparty/clDNN/runtime/CMakeLists.txt index 0367cf831ddedb..2239c99505108b 100644 --- a/inference-engine/thirdparty/clDNN/runtime/CMakeLists.txt +++ b/inference-engine/thirdparty/clDNN/runtime/CMakeLists.txt @@ -69,7 +69,9 @@ target_link_libraries("${CLDNN_BUILD__PROJ}" PRIVATE if(ENABLE_ONEDNN_FOR_GPU) add_dependencies("${CLDNN_BUILD__PROJ}" onednn_gpu_build) target_link_libraries("${CLDNN_BUILD__PROJ}" PUBLIC ${ONEDNN_LIBRARY}) - target_include_directories("${CLDNN_BUILD__PROJ}" PUBLIC ${ONEDNN_INCLUDE_DIRS}) + foreach(dir IN LISTS ONEDNN_INCLUDE_DIRS) + target_include_directories("${CLDNN_BUILD__PROJ}" PUBLIC $) + endforeach() target_compile_definitions("${CLDNN_BUILD__PROJ}" PUBLIC ENABLE_ONEDNN_FOR_GPU) endif() @@ -80,4 +82,7 @@ elseif((NOT ANDROID) AND (UNIX)) endif() target_link_libraries("${CLDNN_BUILD__PROJ}" PRIVATE inference_engine) + +ov_install_static_lib("${CLDNN_BUILD__PROJ}" gpu) + # ====================================================================================================== diff --git a/inference-engine/thirdparty/clDNN/src/CMakeLists.txt b/inference-engine/thirdparty/clDNN/src/CMakeLists.txt index c0b86111581102..f86a7a431d3937 100644 --- a/inference-engine/thirdparty/clDNN/src/CMakeLists.txt +++ b/inference-engine/thirdparty/clDNN/src/CMakeLists.txt @@ -129,7 +129,9 @@ target_link_libraries("${CLDNN_BUILD__PROJ}" PRIVATE if(ENABLE_ONEDNN_FOR_GPU) target_link_libraries("${CLDNN_BUILD__PROJ}" PUBLIC ${ONEDNN_LIBRARY}) - target_include_directories("${CLDNN_BUILD__PROJ}" PUBLIC ${ONEDNN_INCLUDE_DIRS}) + foreach(dir IN LISTS ONEDNN_INCLUDE_DIRS) + target_include_directories("${CLDNN_BUILD__PROJ}" PUBLIC $) + endforeach() target_compile_definitions("${CLDNN_BUILD__PROJ}" PUBLIC ENABLE_ONEDNN_FOR_GPU) endif() diff --git a/inference-engine/thirdparty/movidius/XLink/CMakeLists.txt b/inference-engine/thirdparty/movidius/XLink/CMakeLists.txt index bf513666b7d6da..ca5bc6a001562b 100644 --- a/inference-engine/thirdparty/movidius/XLink/CMakeLists.txt +++ b/inference-engine/thirdparty/movidius/XLink/CMakeLists.txt @@ -19,11 +19,10 @@ if(NOT WIN32) ${LIBUSB_LIBRARY}) endif() -target_include_directories(${TARGET_NAME} - PUBLIC - ${XLINK_INCLUDE} - PRIVATE - ${XLINK_PLATFORM_INCLUDE}) +target_include_directories(${TARGET_NAME} PRIVATE ${XLINK_PLATFORM_INCLUDE}) +foreach(dir IN LISTS XLINK_INCLUDE) + target_include_directories(${TARGET_NAME} PUBLIC $) +endforeach() target_compile_definitions(${TARGET_NAME} PRIVATE @@ -42,3 +41,4 @@ endif() set_property(TARGET ${TARGET_NAME} PROPERTY C_STANDARD 99) openvino_developer_export_targets(COMPONENT inference_engine_vpu TARGETS ${TARGET_NAME}) +ov_install_static_lib(${TARGET_NAME} myriad) diff --git a/inference-engine/thirdparty/movidius/mvnc/CMakeLists.txt b/inference-engine/thirdparty/movidius/mvnc/CMakeLists.txt index 94d461ab911799..f62a65ea8390c6 100644 --- a/inference-engine/thirdparty/movidius/mvnc/CMakeLists.txt +++ b/inference-engine/thirdparty/movidius/mvnc/CMakeLists.txt @@ -10,15 +10,14 @@ include(${XLINK_DIR}/XLink.cmake) file(GLOB_RECURSE MVNC_SOURCES "include/*" "src/*") -set(WATCHDOG_INCLUDE - ${CMAKE_CURRENT_SOURCE_DIR}/include/watchdog/) +set(WATCHDOG_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/include/watchdog/) add_library(${TARGET_NAME} STATIC ${MVNC_SOURCES}) target_include_directories(${TARGET_NAME} PUBLIC - "${CMAKE_CURRENT_SOURCE_DIR}/include" - ${WATCHDOG_INCLUDE} + $ + $ PRIVATE ${XLINK_INCLUDE} ${XLINK_PLATFORM_INCLUDE}) @@ -74,6 +73,7 @@ if(NOT WIN32) endif() openvino_developer_export_targets(COMPONENT inference_engine_vpu TARGETS ${TARGET_NAME}) +ov_install_static_lib(${TARGET_NAME} myriad) if(ENABLE_TESTS AND ENABLE_MYRIAD_MVNC_TESTS) add_subdirectory(tests) diff --git a/ngraph/core/CMakeLists.txt b/ngraph/core/CMakeLists.txt index f93cdfd83bd065..2d382929019159 100644 --- a/ngraph/core/CMakeLists.txt +++ b/ngraph/core/CMakeLists.txt @@ -77,7 +77,7 @@ if(NOT BUILD_SHARED_LIBS) endif() if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - # ngraph is linked against ngraph_builders, ngraph_reference static libraries + # ngraph is linked against ngraph_builders, ngraph_reference, ov_shape_inference static libraries # which include ngraph headers with dllimport attribute. Linker complains about it # but no way to fix this: linking with no attribute defaults to dllexport and we have # multiple defitions for ngraph symbols. @@ -85,7 +85,13 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") # The possible way is to use object libraries for ngraph_builders, ngraph_reference # but it's not convinient since these libraries are exported from build tree # and it's better to use them as static libraries in 3rd party projects - set_target_properties(ngraph PROPERTIES LINK_FLAGS "/IGNORE:4217,4286") + if(BUILD_SHARED_LIBS) + set(link_type PRIVATE) + else() + set(link_type PUBLIC) + endif() + + target_link_options(ngraph ${link_type} "/IGNORE:4217,4286") endif() # some sources are located in ngraph, while headers are in inference_engine_transformations diff --git a/ngraph/core/builder/CMakeLists.txt b/ngraph/core/builder/CMakeLists.txt index b5953a6f1709db..b6c2fb0c0cc45d 100644 --- a/ngraph/core/builder/CMakeLists.txt +++ b/ngraph/core/builder/CMakeLists.txt @@ -15,7 +15,7 @@ set(BUILDER_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include/) source_group("src" FILES ${LIBRARY_SRC}) source_group("include" FILES ${PUBLIC_HEADERS}) -# Create shared library +# Create static library add_library(${TARGET_NAME} STATIC ${LIBRARY_SRC} ${PUBLIC_HEADERS}) if(COMMAND ie_faster_build) @@ -25,14 +25,21 @@ if(COMMAND ie_faster_build) ) endif() -# Defines macro in C++ to load backend plugin -target_include_directories(${TARGET_NAME} PUBLIC ${BUILDER_INCLUDE_DIR} ${NGRAPH_INCLUDE_PATH}) +target_include_directories(${TARGET_NAME} PUBLIC + $ + $) + +if(NOT BUILD_SHARED_LIBS) + target_compile_definitions(${TARGET_NAME} PUBLIC OPENVINO_STATIC_LIBRARY) +endif() add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) # Add an alias so that library can be used inside the build tree, e.g. when testing add_library(ngraph::builder ALIAS ${TARGET_NAME}) +ov_install_static_lib(ngraph_builders ngraph) + # developer package openvino_developer_export_targets(COMPONENT ngraph TARGETS ngraph::builder) diff --git a/ngraph/core/include/openvino/core/extension.hpp b/ngraph/core/include/openvino/core/extension.hpp index 9e997336ac985e..6b8860387706bc 100644 --- a/ngraph/core/include/openvino/core/extension.hpp +++ b/ngraph/core/include/openvino/core/extension.hpp @@ -16,8 +16,8 @@ # define OPENVINO_EXTENSION_C_API OPENVINO_EXTERN_C OPENVINO_CORE_EXPORTS # define OPENVINO_EXTENSION_API OPENVINO_CORE_EXPORTS #else -# define OPENVINO_EXTENSION_C_API OPENVINO_EXTERN_C OPENVINO_API -# define OPENVINO_EXTENSION_API OPENVINO_API +# define OPENVINO_EXTENSION_C_API OPENVINO_EXTERN_C OPENVINO_CORE_EXPORTS +# define OPENVINO_EXTENSION_API OPENVINO_CORE_EXPORTS #endif namespace ov { @@ -27,7 +27,7 @@ class Extension; /** * @brief The class provides the base interface for OpenVINO extensions */ -class OPENVINO_API Extension : public std::enable_shared_from_this { +class OPENVINO_API Extension { public: using Ptr = std::shared_ptr; diff --git a/ngraph/core/reference/CMakeLists.txt b/ngraph/core/reference/CMakeLists.txt index 99c82bf32bbb93..e7b1d1bb20bb3f 100644 --- a/ngraph/core/reference/CMakeLists.txt +++ b/ngraph/core/reference/CMakeLists.txt @@ -15,7 +15,7 @@ set(REF_IMPL_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") source_group("src" FILES ${LIBRARY_SRC}) source_group("include" FILES ${PUBLIC_HEADERS}) -# Create shared library +# Create static library add_library(${TARGET_NAME} STATIC ${LIBRARY_SRC} ${PUBLIC_HEADERS}) if(COMMAND ie_faster_build) @@ -31,8 +31,13 @@ endif() target_compile_definitions(${TARGET_NAME} PRIVATE XBYAK_NO_OP_NAMES XBYAK64) -# Defines macro in C++ to load backend plugin -target_include_directories(${TARGET_NAME} PUBLIC ${REF_IMPL_INCLUDE_DIR} ${NGRAPH_INCLUDE_PATH}) +if(NOT BUILD_SHARED_LIBS) + target_compile_definitions(${TARGET_NAME} PUBLIC OPENVINO_STATIC_LIBRARY) +endif() + +target_include_directories(${TARGET_NAME} PUBLIC + $ + $) link_system_libraries(${TARGET_NAME} PRIVATE xbyak) @@ -41,5 +46,7 @@ add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) # Add an alias so that library can be used inside the build tree, e.g. when testing add_library(ngraph::reference ALIAS ${TARGET_NAME}) +ov_install_static_lib(${TARGET_NAME} ngraph) + # developer package openvino_developer_export_targets(COMPONENT ngraph TARGETS ngraph::reference) diff --git a/ngraph/core/shape_inference/CMakeLists.txt b/ngraph/core/shape_inference/CMakeLists.txt index 23897095fce754..f769ae59d3ef76 100644 --- a/ngraph/core/shape_inference/CMakeLists.txt +++ b/ngraph/core/shape_inference/CMakeLists.txt @@ -15,11 +15,18 @@ set(SHAPE_INFER_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") source_group("src" FILES ${LIBRARY_SRC}) source_group("include" FILES ${PUBLIC_HEADERS}) -# Create shared library +# Create static library add_library(${TARGET_NAME} STATIC ${LIBRARY_SRC} ${PUBLIC_HEADERS}) -# Defines macro in C++ to load backend plugin -target_include_directories(${TARGET_NAME} PUBLIC ${SHAPE_INFER_INCLUDE_DIR} ${NGRAPH_INCLUDE_PATH}) +target_include_directories(${TARGET_NAME} PUBLIC + $ + $) + +if(NOT BUILD_SHARED_LIBS) + target_compile_definitions(${TARGET_NAME} PUBLIC OPENVINO_STATIC_LIBRARY) +endif() + +ov_install_static_lib(${TARGET_NAME} ngraph) # developer package openvino_developer_export_targets(COMPONENT ngraph TARGETS ${TARGET_NAME}) diff --git a/ngraph/frontend/frontend_manager/CMakeLists.txt b/ngraph/frontend/frontend_manager/CMakeLists.txt index 8ac00ebef8761a..ada2b275ea1aa6 100644 --- a/ngraph/frontend/frontend_manager/CMakeLists.txt +++ b/ngraph/frontend/frontend_manager/CMakeLists.txt @@ -27,9 +27,11 @@ source_group("public include" FILES ${LIBRARY_PUBLIC_HEADERS}) add_library(${TARGET_NAME}_static STATIC ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_PUBLIC_HEADERS}) add_library(${TARGET_NAME}::static ALIAS ${TARGET_NAME}_static) target_link_libraries(${TARGET_NAME}_static PRIVATE ${CMAKE_DL_LIBS} openvino::util PUBLIC ngraph) -target_include_directories(${TARGET_NAME}_static PUBLIC ${FRONTEND_INCLUDE_DIR}) +target_include_directories(${TARGET_NAME}_static PUBLIC + $) target_include_directories(${TARGET_NAME}_static PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src) target_compile_definitions(${TARGET_NAME}_static PUBLIC USE_STATIC_FRONTEND_MANAGER) +ov_install_static_lib(${TARGET_NAME}_static ngraph) # Shared library - need to recompile object files to export necessary symbols diff --git a/ngraph/test/engines_util/CMakeLists.txt b/ngraph/test/engines_util/CMakeLists.txt index 49639ae1662319..4f39565c210d23 100644 --- a/ngraph/test/engines_util/CMakeLists.txt +++ b/ngraph/test/engines_util/CMakeLists.txt @@ -11,7 +11,7 @@ if(COMMAND ie_faster_build) endif() target_include_directories(engines_test_util PRIVATE $) -target_link_libraries(engines_test_util PUBLIC ngraph ngraph_backend gtest gmock ngraph_test_util) +target_link_libraries(engines_test_util PUBLIC ngraph ngraph_backend inference_engine gtest gmock ngraph_test_util) target_include_directories(engines_test_util PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) add_clang_format_target(engines_test_util_clang FOR_SOURCES ${ENGINES_UTIL_SRC}) diff --git a/ngraph/test/extension.cpp b/ngraph/test/extension.cpp index 2fcc2be9be27ce..4dccb828a840f9 100644 --- a/ngraph/test/extension.cpp +++ b/ngraph/test/extension.cpp @@ -11,40 +11,8 @@ #include "openvino/util/file_util.hpp" #include "so_extension.hpp" -#ifdef _WIN32 -# ifndef NOMINMAX -# define NOMINMAX -# endif -# include -# if defined(WINAPI_FAMILY) && !WINAPI_PARTITION_DESKTOP -# error "Only WINAPI_PARTITION_DESKTOP is supported, because of LoadLibrary[A|W]" -# endif -#elif defined(__linux) || defined(__APPLE__) -# include -#endif - -static std::string find_my_pathname() { -#ifdef _WIN32 - HMODULE hModule = GetModuleHandleW(SHARED_LIB_PREFIX L"ngraph" SHARED_LIB_SUFFIX); - WCHAR wpath[MAX_PATH]; - GetModuleFileNameW(hModule, wpath, MAX_PATH); - std::wstring ws(wpath); - std::string path(ws.begin(), ws.end()); - replace(path.begin(), path.end(), '\\', '/'); - path = ov::util::get_directory(path); - path += "/"; - return path; -#elif defined(__linux) || defined(__APPLE__) - Dl_info dl_info; - dladdr(reinterpret_cast(ov::replace_output_update_name), &dl_info); - return ov::util::get_directory(dl_info.dli_fname); -#else -# error "Unsupported OS" -#endif -} -std::string get_extension_path() { - return ov::util::make_plugin_library_name(find_my_pathname(), - std::string("template_ov_extension") + IE_BUILD_POSTFIX); +inline std::string get_extension_path() { + return ov::util::make_plugin_library_name({}, std::string("template_ov_extension") + IE_BUILD_POSTFIX); } TEST(extension, load_extension) { @@ -52,9 +20,8 @@ TEST(extension, load_extension) { } TEST(extension, load_extension_and_cast) { - std::vector so_extensions; - EXPECT_NO_THROW(so_extensions = ov::detail::load_extensions(get_extension_path())); - EXPECT_EQ(1, so_extensions.size()); + std::vector so_extensions = ov::detail::load_extensions(get_extension_path()); + ASSERT_EQ(1, so_extensions.size()); std::vector extensions; std::vector> so; for (const auto& ext : so_extensions) { diff --git a/ngraph/test/frontend/shared/CMakeLists.txt b/ngraph/test/frontend/shared/CMakeLists.txt index c048c3bfb7c85c..1c7f2aa6fedef3 100644 --- a/ngraph/test/frontend/shared/CMakeLists.txt +++ b/ngraph/test/frontend/shared/CMakeLists.txt @@ -12,6 +12,6 @@ add_library(${TARGET_NAME} STATIC ${LIBRARY_SRC} ${LIBRARY_HEADERS}) target_include_directories(${TARGET_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) target_include_directories(${TARGET_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../..) target_link_libraries(${TARGET_NAME} PUBLIC frontend_manager - ngraph::ngraph cnpy ie_backend ngraph_test_util engines_test_util commonTestUtils openvino::util) + ngraph cnpy commonTestUtils ie_backend ngraph_test_util engines_test_util openvino::util) add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) diff --git a/ngraph/test/opset.cpp b/ngraph/test/opset.cpp index 90a106b1cbcf49..a4b16ac49aef87 100644 --- a/ngraph/test/opset.cpp +++ b/ngraph/test/opset.cpp @@ -196,17 +196,18 @@ TEST(opset, custom_opset) { ov::OpSet opset; #ifndef OPENVINO_STATIC_LIBRARY opset.insert(); - opset.insert(); - opset.insert(); #endif + opset.insert(); + opset.insert(); opset.insert(); #ifdef OPENVINO_STATIC_LIBRARY - ASSERT_EQ(opset.get_types_info().size(), 1); + EXPECT_EQ(opset.get_types_info().size(), 2); #else - ASSERT_EQ(opset.get_types_info().size(), 3); + EXPECT_EQ(opset.get_types_info().size(), 3); + EXPECT_TRUE(opset.contains_type("MyOpOld")); + // TODO: why is it not registered? + EXPECT_TRUE(opset.contains_type("MyOpNewFromOld")); #endif - ASSERT_TRUE(opset.contains_type("MyOpNew")); - ASSERT_TRUE(opset.contains_type("MyOpOld")); - ASSERT_TRUE(opset.contains_type("MyOpNewFromOld")); - ASSERT_FALSE(opset.contains_type("MyOpIncorrect")); + EXPECT_TRUE(opset.contains_type("MyOpNew")); + EXPECT_FALSE(opset.contains_type("MyOpIncorrect")); } diff --git a/ngraph/test/runtime/CMakeLists.txt b/ngraph/test/runtime/CMakeLists.txt index 3b562478e706bc..de6a433df1a9c1 100644 --- a/ngraph/test/runtime/CMakeLists.txt +++ b/ngraph/test/runtime/CMakeLists.txt @@ -44,7 +44,8 @@ target_link_libraries(ngraph_backend PUBLIC ngraph if (NOT WIN32) target_link_libraries(ngraph_backend PRIVATE ${CMAKE_DL_LIBS}) endif() -target_include_directories(ngraph_backend PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) +target_include_directories(ngraph_backend PUBLIC + $) install(TARGETS ngraph_backend RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL diff --git a/openvino/conditional_compilation/CMakeLists.txt b/openvino/conditional_compilation/CMakeLists.txt index 6c786d401a28a7..70fb4484e7692f 100644 --- a/openvino/conditional_compilation/CMakeLists.txt +++ b/openvino/conditional_compilation/CMakeLists.txt @@ -10,7 +10,8 @@ add_library(openvino::conditional_compilation ALIAS ${TARGET_NAME}) target_link_libraries(${TARGET_NAME} INTERFACE openvino::itt) -target_include_directories(${TARGET_NAME} INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/include) +target_include_directories(${TARGET_NAME} INTERFACE + $) if(SELECTIVE_BUILD STREQUAL "COLLECT") target_compile_definitions(${TARGET_NAME} INTERFACE SELECTIVE_BUILD_ANALYZER) @@ -45,5 +46,7 @@ elseif(SELECTIVE_BUILD STREQUAL "ON") ov_force_include(${TARGET_NAME} INTERFACE ${GENERATED_HEADER}) endif() +ov_install_static_lib(${TARGET_NAME} ngraph) + file(GLOB_RECURSE hdrs ${CMAKE_CURRENT_SOURCE_DIR}/include/*.h ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) add_cpplint_target(${TARGET_NAME}_cpplint FOR_SOURCES ${hdrs}) diff --git a/openvino/itt/CMakeLists.txt b/openvino/itt/CMakeLists.txt index d45b9857ada43a..9be020f86cc7e0 100644 --- a/openvino/itt/CMakeLists.txt +++ b/openvino/itt/CMakeLists.txt @@ -30,6 +30,9 @@ if (CMAKE_COMPILER_IS_GNUCXX) target_compile_options(${TARGET_NAME} PRIVATE -Wall) endif() -target_include_directories(${TARGET_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) +target_include_directories(${TARGET_NAME} PUBLIC + $) + +ov_install_static_lib(${TARGET_NAME} ngraph) add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) diff --git a/openvino/pp/CMakeLists.txt b/openvino/pp/CMakeLists.txt index 9583171dcbce60..e2060db6b3fae3 100644 --- a/openvino/pp/CMakeLists.txt +++ b/openvino/pp/CMakeLists.txt @@ -8,4 +8,7 @@ add_library(${TARGET_NAME} INTERFACE) add_library(openvino::pp ALIAS ${TARGET_NAME}) -target_include_directories(${TARGET_NAME} INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/include) +target_include_directories(${TARGET_NAME} INTERFACE + $) + +ov_install_static_lib(${TARGET_NAME} ngraph) diff --git a/openvino/util/CMakeLists.txt b/openvino/util/CMakeLists.txt index 54c9082810c67e..547e77783ec59e 100644 --- a/openvino/util/CMakeLists.txt +++ b/openvino/util/CMakeLists.txt @@ -33,7 +33,10 @@ add_library(${TARGET_NAME} STATIC ${LIBRARY_SRC} ${PUBLIC_HEADERS}) add_library(openvino::util ALIAS ${TARGET_NAME}) target_link_libraries(${TARGET_NAME} PRIVATE ${CMAKE_DL_LIBS}) -target_include_directories(${TARGET_NAME} PUBLIC ${UTIL_INCLUDE_DIR}) +target_include_directories(${TARGET_NAME} PUBLIC + $) + +ov_install_static_lib(${TARGET_NAME} ngraph) add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} diff --git a/thirdparty/CMakeLists.txt b/thirdparty/CMakeLists.txt index 781e75a34ece54..9b06ee3edf8723 100644 --- a/thirdparty/CMakeLists.txt +++ b/thirdparty/CMakeLists.txt @@ -20,6 +20,7 @@ endif() add_subdirectory(xbyak EXCLUDE_FROM_ALL) openvino_developer_export_targets(COMPONENT openvino_common TARGETS xbyak) +ov_install_static_lib(xbyak openvino_common) # # Pugixml @@ -35,6 +36,7 @@ else() add_subdirectory(pugixml EXCLUDE_FROM_ALL) set_property(TARGET pugixml-static PROPERTY EXPORT_NAME pugixml) openvino_developer_export_targets(COMPONENT openvino_common TARGETS pugixml::static) + ov_install_static_lib(pugixml-static openvino_common) endfunction() ie_build_pugixml() @@ -52,6 +54,9 @@ add_subdirectory(fluid/modules/gapi EXCLUDE_FROM_ALL) set_target_properties(ade fluid PROPERTIES FOLDER thirdparty) openvino_developer_export_targets(COMPONENT openvino_common TARGETS ade fluid) +ov_install_static_lib(ade openvino_common) +ov_install_static_lib(fluid openvino_common) + # # Gflags # diff --git a/thirdparty/ittapi/CMakeLists.txt b/thirdparty/ittapi/CMakeLists.txt index c50ff92fa272b3..e595c9dddd40a5 100644 --- a/thirdparty/ittapi/CMakeLists.txt +++ b/thirdparty/ittapi/CMakeLists.txt @@ -18,6 +18,12 @@ if(ENABLE_PROFILING_ITT) target_compile_options(ittnotify PRIVATE -Wno-undef) endif() + # override INTERFACE_INCLUDE_DIRECTORIES + set_property(TARGET ittnotify PROPERTY INTERFACE_INCLUDE_DIRECTORIES + $ + $) + openvino_developer_export_targets(COMPONENT openvino_common TARGETS ittnotify) + ov_install_static_lib(ittnotify openvino_common) endif() endif() diff --git a/thirdparty/ocl/CMakeLists.txt b/thirdparty/ocl/CMakeLists.txt index a0648080ea13a6..3a2faa64624073 100644 --- a/thirdparty/ocl/CMakeLists.txt +++ b/thirdparty/ocl/CMakeLists.txt @@ -43,10 +43,17 @@ set(CMAKE_C_FLAGS "") set(CMAKE_CXX_FLAGS "") set(CMAKE_C_FLAGS_RELEASE "") set(CMAKE_CXX_FLAGS_RELEASE "") +# OpenCL has absolute paths to include directories +set(CMAKE_WARN_ON_ABSOLUTE_INSTALL_DESTINATION OFF) add_subdirectory(icd_loader) -target_include_directories(OpenCL SYSTEM PUBLIC ${OPENCL_HEADERS_DIR}) +# override INTERFACE_INCLUDE_DIRECTORIES +set_property(TARGET OpenCL PROPERTY INTERFACE_INCLUDE_DIRECTORIES + $) +foreach(dir IN LISTS OPENCL_HEADERS_DIR) + target_include_directories(OpenCL SYSTEM PUBLIC $) +endforeach() # The following varables are needed to make find_package(OpenCL) work set(OPENCL_VERSION_2_2 ON CACHE BOOL "" FORCE) @@ -58,3 +65,5 @@ get_lib_path("${OUTPUT_ROOT}/${BIN_FOLDER}" OPENCL_LIB_DIR) get_lib_name("OpenCL" OPENCL_LIB_NAME) set(OpenCL_LIBRARY "${OPENCL_LIB_DIR}/${OPENCL_LIB_NAME}" CACHE PATH "" FORCE) + +ov_install_static_lib(OpenCL gpu) From 28d83b4f458b1422176ad1fa5f895886048b699c Mon Sep 17 00:00:00 2001 From: akuporos Date: Thu, 11 Nov 2021 03:29:36 +0300 Subject: [PATCH 38/46] fix codestyle --- .../src/pyopenvino/core/infer_request.cpp | 21 ++++++++----------- .../test_infer_request.py | 8 +++---- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp index 31a22431db8ae7..b57d4f7569e2d6 100644 --- a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp @@ -96,16 +96,13 @@ void regclass_InferRequest(py::module m) { } } } - if (userdata != py::none()) { - if (self.user_callback_defined) { - self.userdata = userdata; - } - else { - PyErr_WarnEx(PyExc_RuntimeWarning, - "There is no callback function!", - 1); - } - } + if (userdata != py::none()) { + if (self.user_callback_defined) { + self.userdata = userdata; + } else { + PyErr_WarnEx(PyExc_RuntimeWarning, "There is no callback function!", 1); + } + } py::gil_scoped_release release; self._start_time = Time::now(); self._request.start_async(); @@ -254,8 +251,8 @@ void regclass_InferRequest(py::module m) { }); cls.def_property_readonly("userdata", [](InferRequestWrapper& self) { - return self.userdata; - }); + return self.userdata; + }); cls.def_property_readonly("inputs", [](InferRequestWrapper& self) { return self._inputs; diff --git a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py index 98d48b99fbcb1c..d80907aedebc36 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py @@ -154,21 +154,21 @@ def test_start_async(device): img = read_image() jobs = 3 requests = [] - for i in range(jobs): + for _ in range(jobs): requests.append(exec_net.create_infer_request()) def callback(callbacks_info): time.sleep(0.01) - callbacks_info['finished'] += 1 + callbacks_info["finished"] += 1 callbacks_info = {} - callbacks_info['finished'] = 0 + callbacks_info["finished"] = 0 for request in requests: request.set_callback(callback, callbacks_info) request.start_async({0: img}) for request in requests: request.wait() - assert callbacks_info['finished'] == jobs + assert callbacks_info["finished"] == jobs def test_infer_mixed_keys(device): From 3525a0bbc0dd634f90f2cec1b64ce3ae6db36a8c Mon Sep 17 00:00:00 2001 From: Alexey Lebedev Date: Thu, 11 Nov 2021 11:06:31 +0300 Subject: [PATCH 39/46] rename all methods in this class to snake_case --- .../src/pyopenvino/core/async_infer_queue.cpp | 20 +++++++++---------- .../test_infer_request.py | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp b/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp index 9319dd2404f67e..d521070ce40ff1 100644 --- a/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp @@ -29,7 +29,7 @@ class AsyncInferQueue { : _requests(requests), _idle_handles(idle_handles), _user_ids(user_ids) { - this->setDefaultCallbacks(); + this->set_default_callbacks(); _last_id = -1; } @@ -47,7 +47,7 @@ class AsyncInferQueue { return !(_idle_handles.empty()); } - size_t getIdleRequestId() { + size_t get_idle_request_id() { // Wait for any of _idle_handles py::gil_scoped_release release; std::unique_lock lock(_mutex); @@ -61,7 +61,7 @@ class AsyncInferQueue { return idle_request_id; } - void waitAll() { + void wait_all() { // Wait for all requests to return with callback thus updating // _idle_handles so it matches the size of requests py::gil_scoped_release release; @@ -71,7 +71,7 @@ class AsyncInferQueue { }); } - void setDefaultCallbacks() { + void set_default_callbacks() { for (size_t handle = 0; handle < _requests.size(); handle++) { _requests[handle]._request.set_callback([this, handle /* ... */](std::exception_ptr exception_ptr) { _requests[handle]._end_time = Time::now(); @@ -83,7 +83,7 @@ class AsyncInferQueue { } } - void setCustomCallbacks(py::function f_callback) { + void set_custom_callbacks(py::function f_callback) { for (size_t handle = 0; handle < _requests.size(); handle++) { _requests[handle]._request.set_callback([this, f_callback, handle](std::exception_ptr exception_ptr) { _requests[handle]._end_time = Time::now(); @@ -145,7 +145,7 @@ void regclass_AsyncInferQueue(py::module m) { [](AsyncInferQueue& self, const py::dict inputs, py::object userdata) { // getIdleRequestId function has an intention to block InferQueue // until there is at least one idle (free to use) InferRequest - auto handle = self.getIdleRequestId(); + auto handle = self.get_idle_request_id(); // Set new inputs label/id from user self._user_ids[handle] = userdata; // Update inputs if there are any @@ -178,15 +178,15 @@ void regclass_AsyncInferQueue(py::module m) { }); cls.def("wait_all", [](AsyncInferQueue& self) { - return self.waitAll(); + return self.wait_all(); }); cls.def("get_idle_request_id", [](AsyncInferQueue& self) { - return self.getIdleRequestId(); + return self.get_idle_request_id(); }); - cls.def("set_infer_callback", [](AsyncInferQueue& self, py::function f_callback) { - self.setCustomCallbacks(f_callback); + cls.def("set_callback", [](AsyncInferQueue& self, py::function f_callback) { + self.set_custom_callbacks(f_callback); }); cls.def("__len__", [](AsyncInferQueue& self) { diff --git a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py index 7a9a0b0c2978aa..bc606b0a9f3133 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py @@ -179,7 +179,7 @@ def callback(request, userdata): userdata["finished"] = True img = read_image() - infer_queue.set_infer_callback(callback) + infer_queue.set_callback(callback) assert infer_queue.is_ready for i in range(jobs): infer_queue.start_async({"data": img}, {"finished": False}) From 37b048f345cb9187425a4d703c57829211257e9a Mon Sep 17 00:00:00 2001 From: Alexey Lebedev Date: Thu, 11 Nov 2021 13:15:44 +0300 Subject: [PATCH 40/46] some updates --- runtime/bindings/python/src/openvino/ie_api.py | 5 +++-- .../python/src/pyopenvino/core/async_infer_queue.cpp | 3 +-- .../python/tests/test_inference_engine/test_infer_request.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/runtime/bindings/python/src/openvino/ie_api.py b/runtime/bindings/python/src/openvino/ie_api.py index 9199ccc1a273db..4314398feac618 100644 --- a/runtime/bindings/python/src/openvino/ie_api.py +++ b/runtime/bindings/python/src/openvino/ie_api.py @@ -3,7 +3,7 @@ import numpy as np import copy -from typing import List +from typing import List, Union from openvino.pyopenvino import TBlobFloat32 from openvino.pyopenvino import TBlobFloat64 @@ -17,6 +17,7 @@ from openvino.pyopenvino import TBlobUint8 from openvino.pyopenvino import TensorDesc from openvino.pyopenvino import InferRequest +from openvino.pyopenvino import AsyncInferQueue from openvino.pyopenvino import ExecutableNetwork from openvino.pyopenvino import Tensor @@ -57,7 +58,7 @@ def infer_new_request(exec_net: ExecutableNetwork, inputs: dict = None) -> List[ return [copy.deepcopy(tensor.data) for tensor in res] # flake8: noqa: D102 -def start_async(request: InferRequest, inputs: dict = {}, userdata: dict = None) -> None: # type: ignore +def start_async(request: Union[InferRequest, AsyncInferQueue], inputs: dict = {}, userdata: dict = None) -> None: # type: ignore request._start_async(inputs=normalize_inputs(inputs), userdata=userdata) # flake8: noqa: C901 diff --git a/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp b/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp index d521070ce40ff1..0011eed0e9d313 100644 --- a/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp @@ -17,7 +17,6 @@ #include "pyopenvino/core/common.hpp" #include "pyopenvino/core/infer_request.hpp" -#define INVALID_ID -1 namespace py = pybind11; @@ -92,7 +91,7 @@ class AsyncInferQueue { std::rethrow_exception(exception_ptr); } } catch (const std::exception& e) { - IE_THROW() << "Caught exception: " << e.what(); + throw ov::Exception(e.what()); } // Acquire GIL, execute Python function py::gil_scoped_acquire acquire; diff --git a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py index 2807259921d1fc..62e4514ee987ea 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py @@ -202,7 +202,7 @@ def callback(request, userdata): img = read_image() infer_queue.set_callback(callback) assert infer_queue.is_ready - for i in range(jobs): + for _ in range(jobs): infer_queue.start_async({"data": img}, {"finished": False}) infer_queue.wait_all() assert all([data["finished"] for data in infer_queue.userdata]) From ee4a8cff410a527f7eaa6fb95e7d1766520f83fd Mon Sep 17 00:00:00 2001 From: Alexey Lebedev Date: Thu, 11 Nov 2021 13:28:06 +0300 Subject: [PATCH 41/46] code style --- .../bindings/python/src/pyopenvino/core/async_infer_queue.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp b/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp index 0011eed0e9d313..7409f8a10848f1 100644 --- a/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp @@ -17,7 +17,6 @@ #include "pyopenvino/core/common.hpp" #include "pyopenvino/core/infer_request.hpp" - namespace py = pybind11; class AsyncInferQueue { From 26ef61bbc4fcaf2d552382f2a6d73537276bf4c3 Mon Sep 17 00:00:00 2001 From: Alexey Lebedev Date: Thu, 11 Nov 2021 14:35:23 +0300 Subject: [PATCH 42/46] fix code style in tests --- .../python/tests/test_inference_engine/test_infer_request.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py index 62e4514ee987ea..9dfe77b9cd778d 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py @@ -205,4 +205,4 @@ def callback(request, userdata): for _ in range(jobs): infer_queue.start_async({"data": img}, {"finished": False}) infer_queue.wait_all() - assert all([data["finished"] for data in infer_queue.userdata]) + assert all(data["finished"] for data in infer_queue.userdata) From fd9f0b00a0fcae57927d81e4fcd080d1a50a4df3 Mon Sep 17 00:00:00 2001 From: Alexey Lebedev Date: Sat, 13 Nov 2021 13:08:07 +0300 Subject: [PATCH 43/46] compute latency in callback --- runtime/bindings/python/src/pyopenvino/core/infer_request.cpp | 1 - runtime/bindings/python/src/pyopenvino/core/infer_request.hpp | 4 ++++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp index b57d4f7569e2d6..35e7115bde7c1b 100644 --- a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp @@ -70,7 +70,6 @@ void regclass_InferRequest(py::module m) { // Call Infer function self._start_time = Time::now(); self._request.infer(); - self._end_time = Time::now(); Containers::InferResults results; for (auto& out : self._outputs) { results.push_back(self._request.get_tensor(out)); diff --git a/runtime/bindings/python/src/pyopenvino/core/infer_request.hpp b/runtime/bindings/python/src/pyopenvino/core/infer_request.hpp index 3ea9859db1fcc8..143df8f200cb73 100644 --- a/runtime/bindings/python/src/pyopenvino/core/infer_request.hpp +++ b/runtime/bindings/python/src/pyopenvino/core/infer_request.hpp @@ -20,11 +20,15 @@ class InferRequestWrapper { InferRequestWrapper(ov::runtime::InferRequest request) : _request(request) { + // AsyncInferQueue uses this constructor - setting callback for computing a latency will be done there } InferRequestWrapper(ov::runtime::InferRequest request, const std::vector>& inputs, const std::vector>& outputs) : _request(request), _inputs(inputs), _outputs(outputs) { + _request.set_callback([this](std::exception_ptr exception_ptr) { + _end_time = Time::now(); + }); } // ~InferRequestWrapper() = default; From 0547e2193dd36ddbe61e8c234080a9c0420735fc Mon Sep 17 00:00:00 2001 From: Alexey Lebedev Date: Sat, 13 Nov 2021 15:34:59 +0300 Subject: [PATCH 44/46] Fix get_idle_request --- .../src/pyopenvino/core/async_infer_queue.cpp | 8 ++------ .../test_inference_engine/test_infer_request.py | 14 ++++++++------ 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp b/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp index 7409f8a10848f1..a8098bf55573a5 100644 --- a/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp @@ -28,7 +28,6 @@ class AsyncInferQueue { _idle_handles(idle_handles), _user_ids(user_ids) { this->set_default_callbacks(); - _last_id = -1; } ~AsyncInferQueue() { @@ -53,10 +52,7 @@ class AsyncInferQueue { return !(_idle_handles.empty()); }); - size_t idle_request_id = _idle_handles.front(); - _idle_handles.pop(); - - return idle_request_id; + return _idle_handles.front();; } void wait_all() { @@ -106,7 +102,6 @@ class AsyncInferQueue { std::vector _requests; std::queue _idle_handles; std::vector _user_ids; // user ID can be any Python object - size_t _last_id; std::mutex _mutex; std::condition_variable _cv; }; @@ -144,6 +139,7 @@ void regclass_AsyncInferQueue(py::module m) { // getIdleRequestId function has an intention to block InferQueue // until there is at least one idle (free to use) InferRequest auto handle = self.get_idle_request_id(); + self._idle_handles.pop(); // Set new inputs label/id from user self._user_ids[handle] = userdata; // Update inputs if there are any diff --git a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py index 9dfe77b9cd778d..1ea1442e74af8b 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py @@ -191,18 +191,20 @@ def test_infer_mixed_keys(device): def test_infer_queue(device): jobs = 8 + num_request = 4 core = Core() func = core.read_model(test_net_xml, test_net_bin) exec_net = core.compile_model(func, device) - infer_queue = AsyncInferQueue(exec_net, jobs) + infer_queue = AsyncInferQueue(exec_net, num_request) + jobs_done = [False for _ in range(jobs)] - def callback(request, userdata): - userdata["finished"] = True + def callback(request, job_id): + jobs_done[job_id] = True img = read_image() infer_queue.set_callback(callback) assert infer_queue.is_ready - for _ in range(jobs): - infer_queue.start_async({"data": img}, {"finished": False}) + for i in range(jobs): + infer_queue.start_async({"data": img}, i) infer_queue.wait_all() - assert all(data["finished"] for data in infer_queue.userdata) + assert all(jobs_done) From fa5bdeaa3271a6982d17bd49aaeb70e11e7613f2 Mon Sep 17 00:00:00 2001 From: Alexey Lebedev Date: Sat, 13 Nov 2021 17:10:31 +0300 Subject: [PATCH 45/46] fix latency --- .../python/src/pyopenvino/core/infer_request.cpp | 1 + .../tests/test_inference_engine/test_infer_request.py | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp index 35e7115bde7c1b..b57d4f7569e2d6 100644 --- a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp @@ -70,6 +70,7 @@ void regclass_InferRequest(py::module m) { // Call Infer function self._start_time = Time::now(); self._request.infer(); + self._end_time = Time::now(); Containers::InferResults results; for (auto& out : self._outputs) { results.push_back(self._request.get_tensor(out)); diff --git a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py index 1ea1442e74af8b..98075c099f2307 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py @@ -35,6 +35,7 @@ def test_get_profiling_info(device): img = read_image() request = exec_net.create_infer_request() request.infer({0: img}) + assert request.latency > 0 prof_info = request.get_profiling_info() soft_max_node = next(node for node in prof_info if node.node_name == "fc_out") assert soft_max_node.node_type == "Softmax" @@ -168,6 +169,7 @@ def callback(callbacks_info): request.start_async({0: img}) for request in requests: request.wait() + assert request.latency > 0 assert callbacks_info["finished"] == jobs @@ -196,10 +198,11 @@ def test_infer_queue(device): func = core.read_model(test_net_xml, test_net_bin) exec_net = core.compile_model(func, device) infer_queue = AsyncInferQueue(exec_net, num_request) - jobs_done = [False for _ in range(jobs)] + jobs_done = [{"finished": False, "latency": 0} for _ in range(jobs)] def callback(request, job_id): - jobs_done[job_id] = True + jobs_done[job_id]["finished"] = True + jobs_done[job_id]["latency"] = request.latency img = read_image() infer_queue.set_callback(callback) @@ -207,4 +210,5 @@ def callback(request, job_id): for i in range(jobs): infer_queue.start_async({"data": img}, i) infer_queue.wait_all() - assert all(jobs_done) + assert all(job["finished"] for job in jobs_done) + assert all(job["latency"] > 0 for job in jobs_done) From 6f7187b113e6ae844e2b8d025647accb63c95d9d Mon Sep 17 00:00:00 2001 From: Alexey Lebedev Date: Sat, 13 Nov 2021 17:12:49 +0300 Subject: [PATCH 46/46] Fix code style --- .../bindings/python/src/pyopenvino/core/async_infer_queue.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp b/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp index a8098bf55573a5..8c68958a1ef70a 100644 --- a/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/async_infer_queue.cpp @@ -52,7 +52,8 @@ class AsyncInferQueue { return !(_idle_handles.empty()); }); - return _idle_handles.front();; + return _idle_handles.front(); + ; } void wait_all() {