Skip to content

Commit

Permalink
Merge branch 'openvinotoolkit:master' into alexeyl1/tools/update_benc…
Browse files Browse the repository at this point in the history
…hmark
  • Loading branch information
Alexey Lebedev authored Dec 1, 2021
2 parents d9e80ad + e9a15b7 commit af1400e
Show file tree
Hide file tree
Showing 126 changed files with 1,347 additions and 80 deletions.
2 changes: 1 addition & 1 deletion .ci/azure/linux_onnxruntime.yml
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ jobs:
-DENABLE_CPPLINT=OFF
-DENABLE_TESTS=OFF
-DENABLE_MKL_DNN=ON
-DENABLE_CLDNN=OFF
-DENABLE_INTEL_GPU=OFF
-DENABLE_PROFILING_ITT=OFF
-DENABLE_SAMPLES=OFF
-DNGRAPH_ONNX_FRONTEND_ENABLE=ON
Expand Down
4 changes: 2 additions & 2 deletions .ci/azure/windows.yml
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ jobs:
- script: |
set PATH=$(WORK_DIR)\ninja-win;%PATH%
call "$(MSVS_VARS_PATH)" && $(CMAKE_CMD) -G "Ninja Multi-Config" -DENABLE_ONEDNN_FOR_GPU=OFF -DENABLE_GNA=$(CMAKE_BUILD_SHARED_LIBS) -DENABLE_CLDNN=$(CMAKE_BUILD_SHARED_LIBS) -DENABLE_GAPI_PREPROCESSING=$(CMAKE_BUILD_SHARED_LIBS) -DBUILD_SHARED_LIBS=$(CMAKE_BUILD_SHARED_LIBS) -DENABLE_REQUIREMENTS_INSTALL=OFF -DENABLE_FASTER_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DENABLE_STRICT_DEPENDENCIES=OFF -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE="C:\hostedtoolcache\windows\Python\3.7.6\x64\python.exe" -DPYTHON_INCLUDE_DIR="C:\hostedtoolcache\windows\Python\3.7.6\x64\include" -DPYTHON_LIBRARY="C:\hostedtoolcache\windows\Python\3.7.6\x64\libs\python37.lib" -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)\modules -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(REPO_DIR)
call "$(MSVS_VARS_PATH)" && $(CMAKE_CMD) -G "Ninja Multi-Config" -DENABLE_ONEDNN_FOR_GPU=OFF -DENABLE_GNA=$(CMAKE_BUILD_SHARED_LIBS) -DENABLE_INTEL_GPU=$(CMAKE_BUILD_SHARED_LIBS) -DENABLE_GAPI_PREPROCESSING=$(CMAKE_BUILD_SHARED_LIBS) -DBUILD_SHARED_LIBS=$(CMAKE_BUILD_SHARED_LIBS) -DENABLE_REQUIREMENTS_INSTALL=OFF -DENABLE_FASTER_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DENABLE_STRICT_DEPENDENCIES=OFF -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE="C:\hostedtoolcache\windows\Python\3.7.6\x64\python.exe" -DPYTHON_INCLUDE_DIR="C:\hostedtoolcache\windows\Python\3.7.6\x64\include" -DPYTHON_LIBRARY="C:\hostedtoolcache\windows\Python\3.7.6\x64\libs\python37.lib" -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)\modules -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(REPO_DIR)
workingDirectory: $(BUILD_DIR)
displayName: 'CMake'
Expand Down Expand Up @@ -181,7 +181,7 @@ jobs:
continueOnError: false

- script: |
python -m pip install -r $(INSTALL_DIR)\tests\smoke_tests\requirements.txt
python -m pip install -r $(INSTALL_DIR)\tests\smoke_tests\requirements.txt
workingDirectory: $(INSTALL_DIR)
displayName: 'Install dependencies for samples smoke tests'
continueOnError: false
Expand Down
2 changes: 1 addition & 1 deletion .ci/openvino-onnx/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ RUN cmake .. \
-DENABLE_CPPLINT=OFF \
-DENABLE_TESTS=OFF \
-DENABLE_MKL_DNN=ON \
-DENABLE_CLDNN=OFF \
-DENABLE_INTEL_GPU=OFF \
-DENABLE_PROFILING_ITT=OFF \
-DENABLE_SAMPLES=OFF \
-DENABLE_PYTHON=ON \
Expand Down
2 changes: 1 addition & 1 deletion CODEOWNERS
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,11 @@ azure-pipelines.yml @openvinotoolkit/openvino-admins
/inference-engine/thirdparty/mkl-dnn/ @openvinotoolkit/openvino-ie-cpu-maintainers @openvinotoolkit/openvino-ie-cpu-developers

# IE GPU:
/inference-engine/src/cldnn_engine/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
/src/inference/include/ie/gpu/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
/src/inference/include/ie/cldnn/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
/src/inference/include/openvino/runtime/gpu/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
/inference-engine/thirdparty/clDNN/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
/src/plugins/intel_gpu/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers

# IE VPU:
/inference-engine/src/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers
Expand Down
8 changes: 4 additions & 4 deletions cmake/coverage.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,10 @@ if(ENABLE_MKL_DNN)
PREFIX "${OV_COVERAGE_BASE_DIRECTORY}")
endif()

if(ENABLE_CLDNN)
ie_coverage_extract(INPUT "openvino" OUTPUT "cldnn_engine"
PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/inference-engine/src/cldnn_engine/*")
ie_coverage_genhtml(INFO_FILE "cldnn_engine"
if (ENABLE_INTEL_GPU)
ie_coverage_extract(INPUT "openvino" OUTPUT "intel_gpu_plugin"
PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/src/plugins/intel_gpu/*")
ie_coverage_genhtml(INFO_FILE "intel_gpu_plugin"
PREFIX "${OV_COVERAGE_BASE_DIRECTORY}")
endif()

Expand Down
7 changes: 3 additions & 4 deletions cmake/features.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ ie_option (ENABLE_TESTS "unit, behavior and functional tests" OFF)
ie_option (ENABLE_STRICT_DEPENDENCIES "Skip configuring \"convinient\" dependencies for efficient parallel builds" ON)

ie_dependent_option (ENABLE_CLDNN "clDnn based plugin for inference engine" ON "X86_64;NOT APPLE;NOT MINGW;NOT WINDOWS_STORE;NOT WINDOWS_PHONE" OFF)
ie_dependent_option (ENABLE_INTEL_GPU "GPU plugin for inference engine on Intel GPU" ON "ENABLE_CLDNN" OFF)

if (NOT ENABLE_CLDNN OR ANDROID OR
(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0))
Expand Down Expand Up @@ -55,8 +56,6 @@ ie_dependent_option (ENABLE_DOCS "Build docs using Doxygen" OFF "PYTHONINTERP_FO

ie_dependent_option (ENABLE_GNA "GNA support for inference engine" ON "NOT APPLE;NOT ANDROID;X86_64" OFF)

ie_dependent_option (ENABLE_CLDNN_TESTS "Enable clDNN unit tests" OFF "ENABLE_CLDNN" OFF)

# "MKL-DNN library based on OMP or TBB or Sequential implementation: TBB|OMP|SEQ"
if(X86 OR ARM OR (MSVC AND (ARM OR AARCH64)) )
set(THREADING_DEFAULT "SEQ")
Expand Down Expand Up @@ -197,8 +196,8 @@ if (ENABLE_MYRIAD_NO_BOOT AND ENABLE_MYRIAD )
add_definitions(-DENABLE_MYRIAD_NO_BOOT=1)
endif()

if (ENABLE_CLDNN)
add_definitions(-DENABLE_CLDNN=1)
if (ENABLE_INTEL_GPU)
add_definitions(-DENABLE_INTEL_GPU=1)
endif()

if (ENABLE_MKL_DNN)
Expand Down
2 changes: 1 addition & 1 deletion docs/IE_DG/Extensibility_DG/Extension.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ Based on that, the declaration of an extension class can look as follows:

@snippet template_extension/old/extension.hpp extension:header

The extension library should contain and export the InferenceEngine::CreateExtension method, which creates an `Extension` class:
The extension library should use `IE_DEFINE_EXTENSION_CREATE_FUNCTION` macro to export a function, which creates an `Extension` class:

@snippet template_extension/old/extension.cpp extension:CreateExtension

Expand Down
2 changes: 1 addition & 1 deletion docs/IE_DG/Extensibility_DG/Intro.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

Inference Engine Extensibility API enables you to add support of custom operations to the Inference Engine.
Extension should contain operation sets with custom operations and execution kernels for custom operations.
Physically, an extension library can be represented as a dynamic library exporting the single `CreateExtension` function
Physically, an extension library can be represented as a dynamic library exporting the single function
that creates a new extension instance.

To load the Extensibility library to the `InferenceEngine::Core` object, use the
Expand Down
2 changes: 1 addition & 1 deletion docs/IE_DG/supported_plugins/GPU.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ GPU Plugin {#openvino_docs_IE_DG_supported_plugins_GPU}

The GPU plugin uses the Intel® Compute Library for Deep Neural Networks (clDNN) to infer deep neural networks.
clDNN is an open source performance library for Deep Learning (DL) applications intended for acceleration of Deep Learning Inference on Intel® Processor Graphics including Intel® HD Graphics, Intel® Iris® Graphics, Intel® Iris® Xe Graphics, and Intel® Iris® Xe MAX graphics.
For an in-depth description of clDNN, see [Inference Engine source files](https://github.com/openvinotoolkit/openvino/tree/master/inference-engine/src/cldnn_engine) and [Accelerate Deep Learning Inference with Intel® Processor Graphics](https://software.intel.com/en-us/articles/accelerating-deep-learning-inference-with-intel-processor-graphics).
For an in-depth description of clDNN, see [Inference Engine source files](https://github.com/openvinotoolkit/openvino/tree/master/src/plugins/intel_gpu/) and [Accelerate Deep Learning Inference with Intel® Processor Graphics](https://software.intel.com/en-us/articles/accelerating-deep-learning-inference-with-intel-processor-graphics).

## Device Naming Convention
* Devices are enumerated as "GPU.X" where `X={0, 1, 2,...}`. Only Intel® GPU devices are considered.
Expand Down
14 changes: 12 additions & 2 deletions docs/template_plugin/src/template_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -152,14 +152,24 @@ InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::
// So we need store as supported either unsupported node sets
std::unordered_set<std::string> supported;
std::unordered_set<std::string> unsupported;
auto opset = ngraph::get_opset4();
ngraph::OpSet op_super_set;
#define _OPENVINO_OP_REG(NAME, NAMESPACE) op_super_set.insert<NAMESPACE::NAME>();
#include "openvino/opsets/opset1_tbl.hpp"
#include "openvino/opsets/opset2_tbl.hpp"
#include "openvino/opsets/opset3_tbl.hpp"
#include "openvino/opsets/opset4_tbl.hpp"
#include "openvino/opsets/opset5_tbl.hpp"
#include "openvino/opsets/opset6_tbl.hpp"
#include "openvino/opsets/opset7_tbl.hpp"
#include "openvino/opsets/opset8_tbl.hpp"
#undef _OPENVINO_OP_REG
for (auto&& node : transformedFunction->get_ops()) {
// Extract transformation history from transformed node as list of nodes
for (auto&& fusedLayerName : ngraph::getFusedNamesVector(node)) {
// Filter just nodes from original operation set
// TODO: fill with actual decision rules based on whether kernel is supported by backend
if (InferenceEngine::details::contains(originalOps, fusedLayerName)) {
if (opset.contains_type(friendlyNameToType[fusedLayerName])) {
if (op_super_set.contains_type(friendlyNameToType[fusedLayerName])) {
supported.emplace(fusedLayerName);
} else {
unsupported.emplace(fusedLayerName);
Expand Down
4 changes: 0 additions & 4 deletions inference-engine/src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,6 @@ if(ENABLE_MKL_DNN)
add_subdirectory(mkldnn_plugin)
endif()

if(ENABLE_CLDNN)
add_subdirectory(cldnn_engine)
endif()

if(ENABLE_VPU)
add_subdirectory(vpu)
endif()
Expand Down
9 changes: 9 additions & 0 deletions inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@
#include "nodes/mkldnn_fake_quantize_node.h"
#include "nodes/mkldnn_normalize_node.h"
#include "ngraph_transformations/convert_to_cpu_specific_opset.hpp"
#include "ngraph_transformations/move_eltwise_up_data_movement.hpp"
#include "transformations/smart_reshape/smart_reshape.hpp"

#if !defined(__arm__) && !defined(_M_ARM) && !defined(__aarch64__) && !defined(_M_ARM64)
Expand Down Expand Up @@ -477,6 +478,14 @@ static void TransformationUpToCPUSpecificOpSet(std::shared_ptr<ngraph::Function>
return node->get_rt_info().count("UNROLL_TI") == 0;
});

postLPTPassManager.register_pass<MoveEltwiseUpThroughDataMov>();
postLPTPassManager.get_pass_config()->set_callback<MoveEltwiseUpThroughDataMov>([](const std::shared_ptr<const ngraph::Node>& node) -> bool {
if (node->get_input_size() >= 2) {
return node->get_input_element_type(1) == ngraph::element::i8 || node->get_input_element_type(1) == ngraph::element::u8;
}
return false;
});

postLPTPassManager.run_passes(nGraphFunc);
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "move_eltwise_up_data_movement.hpp"

#include <memory>
#include <vector>
#include <numeric>

#include <ngraph/opsets/opset8.hpp>
#include <ngraph/rt_info.hpp>
#include <ngraph/pattern/op/wrap_type.hpp>


NGRAPH_RTTI_DEFINITION(MKLDNNPlugin::MoveEltwiseUpThroughDataMov, "MoveEltwiseUpThroughDataMov", 0);

namespace {
bool is_data_movement_operation(const std::shared_ptr<ngraph::Node>& node) {
return ov::is_type<ngraph::op::v0::Squeeze>(node) ||
ov::is_type<ngraph::op::v0::Unsqueeze>(node) ||
ov::is_type<ngraph::op::v1::Reshape>(node) ||
ov::is_type<ngraph::op::v1::Transpose>(node) ||
ov::is_type<ngraph::op::v0::ShuffleChannels>(node) ||
ov::is_type<ngraph::op::v7::Roll>(node) ||
ov::is_type<ngraph::op::v0::ReverseSequence>(node) ||
ov::is_type<ngraph::op::v0::DepthToSpace>(node) ||
ov::is_type<ngraph::op::v1::BatchToSpace>(node) ||
ov::is_type<ngraph::op::v1::Broadcast>(node) ||
ov::is_type<ngraph::op::v3::Broadcast>(node) ||
ov::is_type<ngraph::op::v1::Gather>(node) ||
ov::is_type<ngraph::op::v7::Gather>(node) ||
ov::is_type<ngraph::op::v8::Gather>(node);
}

bool is_scalar_like(const std::shared_ptr<ngraph::Node>& node) {
auto constantNode = std::dynamic_pointer_cast<ngraph::opset8::Constant>(node);
return constantNode != nullptr && shape_size(constantNode->get_shape()) == 1;
}
} // namespace

MKLDNNPlugin::MoveEltwiseUpThroughDataMov::MoveEltwiseUpThroughDataMov() {
auto eltwise_pattern = ngraph::pattern::wrap_type<ngraph::op::util::UnaryElementwiseArithmetic,
ngraph::op::util::BinaryElementwiseArithmetic>(ngraph::pattern::has_static_rank());

ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map();

auto eltwise = pattern_map.at(eltwise_pattern).get_node_shared_ptr();
if (transformation_callback(eltwise)) {
return false;
}

if (eltwise->get_output_size() == 0 ||
eltwise->get_input_size() == 0 ||
eltwise->get_output_element_type(0) != eltwise->get_input_element_type(0) ||
eltwise->get_output_target_inputs(0).size() != 1) {
return false;
}

bool is_binary_op = std::dynamic_pointer_cast<ngraph::op::util::BinaryElementwiseArithmetic>(eltwise) != nullptr;
if (is_binary_op && !is_scalar_like(eltwise->get_input_node_shared_ptr(1))) {
return false;
}



auto current = eltwise->get_input_node_shared_ptr(0);
auto child = eltwise;

while (is_data_movement_operation(current)) {
if (current->get_output_size() != 1 ||
current->get_output_target_inputs(0).size() != 1 ||
current->get_output_element_type(0) != current->get_input_element_type(0)) {
return false;
}

child = current;
current = current->get_input_node_shared_ptr(0);
}

// now current is the first not data movement op
if (child == eltwise) {
return false;
}

// eltwise constant shape should match new input shape
if (is_binary_op && current->get_output_shape(0).size() != eltwise->get_input_shape(1).size()) {
auto old_eltwise_const = std::dynamic_pointer_cast<ngraph::opset8::Constant>(eltwise->get_input_node_shared_ptr(1));
auto new_constant = std::make_shared<ngraph::opset8::Constant>(*old_eltwise_const.get(), ngraph::Shape{});
ngraph::replace_node(old_eltwise_const, new_constant);
}
ngraph::replace_output_update_name(eltwise->output(0), eltwise->input_value(0));

ngraph::OutputVector eltwiseInputs = eltwise->input_values();
eltwiseInputs[0] = child->input_value(0);
auto newEltwise = eltwise->clone_with_new_inputs(eltwiseInputs);
ngraph::copy_runtime_info(eltwise, newEltwise);
newEltwise->set_friendly_name(eltwise->get_friendly_name());

ngraph::OutputVector childInputs = child->input_values();
childInputs[0] = newEltwise;
auto newChild = child->clone_with_new_inputs(childInputs);
ngraph::copy_runtime_info(child, newChild);
newChild->set_friendly_name(child->get_friendly_name());

ngraph::replace_node(child, newChild);
return true;
};

auto m = std::make_shared<ngraph::pattern::Matcher>(eltwise_pattern, "MoveEltwiseUpThroughDataMov");
register_matcher(m, callback);
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include <ngraph/pass/graph_rewrite.hpp>

namespace MKLDNNPlugin {

class MoveEltwiseUpThroughDataMov : public ngraph::pass::MatcherPass {
public:
NGRAPH_RTTI_DECLARATION;
MoveEltwiseUpThroughDataMov();
};

} // namespace MKLDNNPlugin
Loading

0 comments on commit af1400e

Please sign in to comment.