From 6b0462967fc4a5ead813c5cb39fe61392aa58179 Mon Sep 17 00:00:00 2001 From: Nikolay Tyukaev Date: Tue, 22 Sep 2020 12:32:34 +0300 Subject: [PATCH 01/41] convert to doxygen comments --- docs/doxygen/ie_c_api.xml | 6 + docs/doxygen/ie_docs.xml | 5 +- docs/doxygen/ie_py_api.xml | 6 + docs/doxygen/ngraph_cpp_api.xml | 208 +++ docs/doxygen/ngraph_py_api.xml | 207 +++ ngraph/python/src/ngraph/__init__.py | 2 +- ngraph/python/src/ngraph/exceptions.py | 8 +- ngraph/python/src/ngraph/helpers.py | 4 +- ngraph/python/src/ngraph/opset1/ops.py | 1354 ++++++++--------- ngraph/python/src/ngraph/opset2/ops.py | 84 +- ngraph/python/src/ngraph/opset3/ops.py | 302 ++-- ngraph/python/src/ngraph/opset4/ops.py | 162 +- ngraph/python/src/ngraph/opset_utils.py | 2 +- ngraph/python/src/ngraph/utils/__init__.py | 2 +- .../python/src/ngraph/utils/broadcasting.py | 10 +- ngraph/python/src/ngraph/utils/decorators.py | 6 +- .../src/ngraph/utils/input_validation.py | 58 +- .../python/src/ngraph/utils/node_factory.py | 52 +- ngraph/python/src/ngraph/utils/reduction.py | 8 +- .../src/ngraph/utils/tensor_iterator_types.py | 32 +- ngraph/python/src/ngraph/utils/types.py | 18 +- 21 files changed, 1483 insertions(+), 1053 deletions(-) create mode 100644 docs/doxygen/ngraph_cpp_api.xml create mode 100644 docs/doxygen/ngraph_py_api.xml diff --git a/docs/doxygen/ie_c_api.xml b/docs/doxygen/ie_c_api.xml index 4c23367111f18e..1e650bfb0db6f8 100644 --- a/docs/doxygen/ie_c_api.xml +++ b/docs/doxygen/ie_c_api.xml @@ -29,6 +29,12 @@ + + + + + + diff --git a/docs/doxygen/ie_docs.xml b/docs/doxygen/ie_docs.xml index cb6afcf1801947..aa746fea5e0738 100644 --- a/docs/doxygen/ie_docs.xml +++ b/docs/doxygen/ie_docs.xml @@ -813,7 +813,10 @@ - + + + + diff --git a/docs/doxygen/ie_py_api.xml b/docs/doxygen/ie_py_api.xml index 35254f6edb5b15..24655dfc0b9155 100644 --- a/docs/doxygen/ie_py_api.xml +++ b/docs/doxygen/ie_py_api.xml @@ -27,6 +27,12 @@ + + + + + + diff --git a/docs/doxygen/ngraph_cpp_api.xml b/docs/doxygen/ngraph_cpp_api.xml new file mode 100644 index 00000000000000..018e5ea6de86cc --- /dev/null +++ b/docs/doxygen/ngraph_cpp_api.xml @@ -0,0 +1,208 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/ngraph_py_api.xml b/docs/doxygen/ngraph_py_api.xml new file mode 100644 index 00000000000000..39da4397dd6b6d --- /dev/null +++ b/docs/doxygen/ngraph_py_api.xml @@ -0,0 +1,207 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ngraph/python/src/ngraph/__init__.py b/ngraph/python/src/ngraph/__init__.py index bbbe3331630bd1..b664ad404307d6 100644 --- a/ngraph/python/src/ngraph/__init__.py +++ b/ngraph/python/src/ngraph/__init__.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ****************************************************************************** -"""ngraph module namespace, exposing factory functions for all ops and other classes.""" +"""! ngraph module namespace, exposing factory functions for all ops and other classes.""" # noqa: F401 from pkg_resources import get_distribution, DistributionNotFound diff --git a/ngraph/python/src/ngraph/exceptions.py b/ngraph/python/src/ngraph/exceptions.py index 4bfceb26926b02..43348e3b2c545b 100644 --- a/ngraph/python/src/ngraph/exceptions.py +++ b/ngraph/python/src/ngraph/exceptions.py @@ -13,16 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. # ****************************************************************************** -"""ngraph exceptions hierarchy. All exceptions are descendants of NgraphError.""" +"""! ngraph exceptions hierarchy. All exceptions are descendants of NgraphError.""" class NgraphError(Exception): - """Base class for Ngraph exceptions.""" + """! Base class for Ngraph exceptions.""" class UserInputError(NgraphError): - """User provided unexpected input.""" + """! User provided unexpected input.""" class NgraphTypeError(NgraphError, TypeError): - """Type mismatch error.""" + """! Type mismatch error.""" diff --git a/ngraph/python/src/ngraph/helpers.py b/ngraph/python/src/ngraph/helpers.py index 226b4ddeb72b79..d527eee7c7536f 100644 --- a/ngraph/python/src/ngraph/helpers.py +++ b/ngraph/python/src/ngraph/helpers.py @@ -13,14 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. # ****************************************************************************** -"""nGraph helper functions.""" +"""! nGraph helper functions.""" from ngraph.impl import Function from openvino.inference_engine import IENetwork def function_from_cnn(cnn_network: IENetwork) -> Function: - """Get nGraph function from Inference Engine CNN network.""" + """! Get nGraph function from Inference Engine CNN network.""" capsule = cnn_network._get_function_capsule() ng_function = Function.from_capsule(capsule) return ng_function diff --git a/ngraph/python/src/ngraph/opset1/ops.py b/ngraph/python/src/ngraph/opset1/ops.py index e848bc8a50abc7..fa47680c951b7e 100644 --- a/ngraph/python/src/ngraph/opset1/ops.py +++ b/ngraph/python/src/ngraph/opset1/ops.py @@ -14,7 +14,7 @@ # limitations under the License. # ****************************************************************************** -"""Factory functions for all ngraph ops.""" +"""! Factory functions for all ngraph ops.""" from typing import Callable, Iterable, List, Optional, Set, Union import numpy as np @@ -60,22 +60,22 @@ @unary_op def absolute(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies f(x) = abs(x) to the input node element-wise. + """! Return node which applies f(x) = abs(x) to the input node element-wise. - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with Abs operation applied on it. + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with Abs operation applied on it. """ return _get_node_factory_opset1().create("Abs", [node]) @unary_op def acos(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply inverse cosine function on the input node element-wise. + """! Apply inverse cosine function on the input node element-wise. - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arccos operation applied on it. + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with arccos operation applied on it. """ return _get_node_factory_opset1().create("Acos", [node]) @@ -87,7 +87,7 @@ def add( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node which applies f(x) = A+B to the input nodes element-wise.""" + """! Return node which applies f(x) = A+B to the input nodes element-wise.""" return _get_node_factory_opset1().create( "Add", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} ) @@ -95,22 +95,22 @@ def add( @unary_op def asin(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply inverse sine function on the input node element-wise. + """! Apply inverse sine function on the input node element-wise. - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arcsin operation applied on it. + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with arcsin operation applied on it. """ return _get_node_factory_opset1().create("Asin", [node]) @unary_op def atan(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply inverse tangent function on the input node element-wise. + """! Apply inverse tangent function on the input node element-wise. - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arctan operation applied on it. + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with arctan operation applied on it. """ return _get_node_factory_opset1().create("Atan", [node]) @@ -127,21 +127,21 @@ def avg_pool( auto_pad: Optional[str] = None, name: Optional[str] = None, ) -> Node: - """Return average pooling node. + """! Return average pooling node. - :param data_batch: The input node providing data. - :param strides: The window movement strides. - :param pads_begin: The input data optional padding below filled with zeros. - :param pads_end: The input data optional padding below filled with zeros. - :param kernel_shape: The pooling window shape. - :param exclude_pad: Whether or not to include zero padding in average computations. - :param rounding_type: Determines used rounding schema when computing output shape. Acceptable + @param data_batch: The input node providing data. + @param strides: The window movement strides. + @param pads_begin: The input data optional padding below filled with zeros. + @param pads_end: The input data optional padding below filled with zeros. + @param kernel_shape: The pooling window shape. + @param exclude_pad: Whether or not to include zero padding in average computations. + @param rounding_type: Determines used rounding schema when computing output shape. Acceptable values are: ['floor', 'ceil'] - :param auto_pad: Determines how the padding is calculated. Acceptable values: + @param auto_pad: Determines how the padding is calculated. Acceptable values: [None, 'same_upper', 'same_lower', 'valid'] - :param name: Optional name for the new output node. + @param name: Optional name for the new output node. - :return: New node with AvgPool operation applied on its data. + @return New node with AvgPool operation applied on its data. """ if auto_pad is None: auto_pad = "explicit" @@ -170,17 +170,17 @@ def batch_norm_inference( epsilon: float, name: Optional[str] = None, ) -> Node: - """Perform layer normalizes a input tensor by mean and variance with appling scale and offset. + """! Perform layer normalizes a input tensor by mean and variance with appling scale and offset. - :param data: The input tensor with data for normalization. - :param gamma: The scalar scaling for normalized value. - :param beta: The bias added to the scaled normalized value. - :param mean: The value for mean normalization. - :param variance: The value for variance normalization. - :param epsilon: The number to be added to the variance to avoid division + @param data: The input tensor with data for normalization. + @param gamma: The scalar scaling for normalized value. + @param beta: The bias added to the scaled normalized value. + @param mean: The value for mean normalization. + @param variance: The value for variance normalization. + @param epsilon: The number to be added to the variance to avoid division by zero when normalizing a value. - :param name: The optional name of the output node. - :return: The new node which performs BatchNormInference. + @param name: The optional name of the output node. + @return The new node which performs BatchNormInference. """ inputs = as_nodes(gamma, beta, data, mean, variance) return _get_node_factory_opset1().create("BatchNormInference", inputs, {"epsilon": epsilon}) @@ -199,19 +199,19 @@ def binary_convolution( auto_pad: str = "EXPLICIT", name: Optional[str] = None, ) -> Node: - """Create node performing convolution with binary weights, binary input and integer output. + """! Create node performing convolution with binary weights, binary input and integer output. - :param data: The node providing data batch tensor. - :param filter: The node providing filters tensor. - :param strides: The kernel window movement strides. - :param pads_begin: The number of pixels to add to the beginning along each axis. - :param pads_end: The number of pixels to add to the end along each axis. - :param dilations: The distance in width and height between elements (weights) in the filter. - :param mode: Defines how input tensor 0/1 values and weights 0/1 are interpreted. - :param pad_value: Floating-point value used to fill pad area. - :param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. - :param name: The optional new name for output node. - :return: New node performing binary convolution operation. + @param data: The node providing data batch tensor. + @param filter: The node providing filters tensor. + @param strides: The kernel window movement strides. + @param pads_begin: The number of pixels to add to the beginning along each axis. + @param pads_end: The number of pixels to add to the end along each axis. + @param dilations: The distance in width and height between elements (weights) in the filter. + @param mode: Defines how input tensor 0/1 values and weights 0/1 are interpreted. + @param pad_value: Floating-point value used to fill pad area. + @param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. + @param name: The optional new name for output node. + @return New node performing binary convolution operation. """ return _get_node_factory_opset1().create( "BinaryConvolution", @@ -236,16 +236,16 @@ def broadcast( mode: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Create a node which broadcasts the input node's values along specified axes to a desired shape. + """! Create a node which broadcasts the input node's values along specified axes to a desired shape. - :param data: The node with input tensor data. - :param target_shape: The node with a new shape we want to broadcast tensor to. - :param axes_mapping: The node with a axis positions (0-based) in the result + @param data: The node with input tensor data. + @param target_shape: The node with a new shape we want to broadcast tensor to. + @param axes_mapping: The node with a axis positions (0-based) in the result that are being broadcast. - :param mode: The type of broadcasting that specifies mapping of input tensor axes + @param mode: The type of broadcasting that specifies mapping of input tensor axes to output shape axes. Range of values: NUMPY, EXPLICIT. - :param name: Optional new name for output node. - :return: New node with broadcast shape. + @param name: Optional new name for output node. + @return New node with broadcast shape. """ inputs = as_nodes(data, target_shape) if mode.upper() == "EXPLICIT": @@ -262,13 +262,13 @@ def ctc_greedy_decoder( merge_repeated: bool = True, name: Optional[str] = None, ) -> Node: - """Perform greedy decoding on the logits given in input (best path). + """! Perform greedy decoding on the logits given in input (best path). - :param data: Logits on which greedy decoding is performed. - :param sequence_mask: The tensor with sequence masks for each sequence in the batch. - :param merge_repeated: The flag for merging repeated labels during the CTC calculation. - :param name: Optional name for output node. - :return: The new node performing an CTCGreedyDecoder operation on input tensor. + @param data: Logits on which greedy decoding is performed. + @param sequence_mask: The tensor with sequence masks for each sequence in the batch. + @param merge_repeated: The flag for merging repeated labels during the CTC calculation. + @param name: Optional name for output node. + @return The new node performing an CTCGreedyDecoder operation on input tensor. """ node_inputs = as_nodes(data, sequence_mask) return _get_node_factory_opset1().create( @@ -278,11 +278,11 @@ def ctc_greedy_decoder( @unary_op def ceiling(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies ceiling to the input node element-wise. + """! Return node which applies ceiling to the input node element-wise. - :param node: The node providing data to ceiling operation. - :param name: Optional name for output node. - :return: The node performing element-wise ceiling. + @param node: The node providing data to ceiling operation. + @param name: Optional name for output node. + @return The node performing element-wise ceiling. """ return _get_node_factory_opset1().create("Ceiling", [node]) @@ -291,29 +291,29 @@ def ceiling(node: NodeInput, name: Optional[str] = None) -> Node: def clamp( data: NodeInput, min_value: ScalarData, max_value: ScalarData, name: Optional[str] = None ) -> Node: - """Perform clamp element-wise on data from input node. + """! Perform clamp element-wise on data from input node. Performs a clipping operation on an input value between a pair of boundary values. - For each element in :code:`data`, if the element's value is lower than :code:`min_value`, - it will be replaced with :code:`min_value`. If the value is higher than :code:`max_value`, - it will be replaced by :code:`max_value`. - Intermediate values of :code:`data` are returned without change. + For each element in `data`, if the element's value is lower than `min_value`, + it will be replaced with `min_value`. If the value is higher than `max_value`, + it will be replaced by `max_value`. + Intermediate values of `data` are returned without change. Clamp uses the following logic: - .. code-block:: python - + ~~~~~~~~~~~~~~~~~~~~~~~~{.py} if data < min_value: data=min_value elif data > max_value: data=max_value + ~~~~~~~~~~~~~~~~~~~~~~~~ - :param data: Input tensor. One of: input node, array or scalar. - :param min_value: The lower bound of the range. Scalar value. - :param max_value: The upper bound of the range. Scalar value. - :param name: Optional output node name. - :return: The new node performing a clamp operation on its input data element-wise. + @param data: Input tensor. One of: input node, array or scalar. + @param min_value: The lower bound of the range. Scalar value. + @param max_value: The upper bound of the range. Scalar value. + @param name: Optional output node name. + @return The new node performing a clamp operation on its input data element-wise. """ return _get_node_factory_opset1().create( "Clamp", [as_node(data)], {"min": min_value, "max": max_value} @@ -322,24 +322,24 @@ def clamp( @nameable_op def concat(nodes: List[NodeInput], axis: int, name: Optional[str] = None) -> Node: - """Concatenate input nodes into single new node along specified axis. + """! Concatenate input nodes into single new node along specified axis. - :param nodes: The nodes we want concatenate into single new node. - :param axis: The axis along which we want to concatenate input nodes. - :param name: The optional new name for output node. - :return: Return new node that is a concatenation of input nodes. + @param nodes: The nodes we want concatenate into single new node. + @param axis: The axis along which we want to concatenate input nodes. + @param name: The optional new name for output node. + @return Return new node that is a concatenation of input nodes. """ return _get_node_factory_opset1().create("Concat", as_nodes(*nodes), {"axis": axis}) @nameable_op def constant(value: NumericData, dtype: NumericType = None, name: Optional[str] = None) -> Constant: - """Create a Constant node from provided value. + """! Create a Constant node from provided value. - :param value: One of: array of values or scalar to initialize node with. - :param dtype: The data type of provided data. - :param name: Optional name for output node. - :return: The Constant node initialized with provided data. + @param value: One of: array of values or scalar to initialize node with. + @param dtype: The data type of provided data. + @param name: Optional name for output node. + @return The Constant node initialized with provided data. """ return make_constant_node(value, dtype) @@ -348,12 +348,12 @@ def constant(value: NumericData, dtype: NumericType = None, name: Optional[str] def convert( data: NodeInput, destination_type: Union[str, NumericType], name: Optional[str] = None ) -> Node: - """Return node which casts input node values to specified type. + """! Return node which casts input node values to specified type. - :param data: Node which produces the input tensor. - :param destination_type: Provides the target type for the conversion. - :param name: Optional name for the output node. - :return: New node performing the conversion operation. + @param data: Node which produces the input tensor. + @param destination_type: Provides the target type for the conversion. + @param name: Optional name for the output node. + @return New node performing the conversion operation. """ if not isinstance(destination_type, str): destination_type = get_element_type_str(destination_type) @@ -364,12 +364,12 @@ def convert( @binary_op def convert_like(data: NodeInput, like: NodeInput, name: Optional[str] = None) -> Node: - """Return node which casts data node values to the type of another node. + """! Return node which casts data node values to the type of another node. - :param data: Node which produces the input tensor - :param like: Node which provides the target type information for the conversion - :param name: Optional name for the output node. - :return: New node performing the conversion operation. + @param data: Node which produces the input tensor + @param like: Node which provides the target type information for the conversion + @param name: Optional name for the output node. + @return New node performing the conversion operation. """ return _get_node_factory_opset1().create("ConvertLike", [data, like]) @@ -385,17 +385,17 @@ def convolution( auto_pad: str = "EXPLICIT", name: Optional[str] = None, ) -> Node: - """Return node performing batched convolution operation. + """! Return node performing batched convolution operation. - :param data: The node providing data batch tensor. - :param filter: The node providing filters tensor. - :param strides: The kernel window movement strides. - :param pads_begin: The number of zero padding elements to add on each axis below 0 coordinate. - :param pads_end: The number of zero padding elements to add on each axis above max coordinate - :param dilations: The data batch dilation strides. - :param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. - :param name: The optional new name for output node. - :return: New node performing batched convolution operation. + @param data: The node providing data batch tensor. + @param filter: The node providing filters tensor. + @param strides: The kernel window movement strides. + @param pads_begin: The number of zero padding elements to add on each axis below 0 coordinate. + @param pads_end: The number of zero padding elements to add on each axis above max coordinate + @param dilations: The data batch dilation strides. + @param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. + @param name: The optional new name for output node. + @return New node performing batched convolution operation. """ return _get_node_factory_opset1().create( "Convolution", @@ -423,20 +423,20 @@ def convolution_backprop_data( output_padding: Optional[List[int]] = None, name: Optional[str] = None, ) -> Node: - """Create node performing a batched-convolution backprop data operation. + """! Create node performing a batched-convolution backprop data operation. - :param data: The node producing data from forward-prop - :param filters: The node producing the filters from forward-prop. - :param output_shape: The node producing output delta. - :param strides: The distance (in pixels) to slide the filter on the feature map + @param data: The node producing data from forward-prop + @param filters: The node producing the filters from forward-prop. + @param output_shape: The node producing output delta. + @param strides: The distance (in pixels) to slide the filter on the feature map over the axes. - :param pads_begin: The number of pixels to add to the beginning along each axis. - :param pads_end: The number of pixels to add to the end along each axis. - :param dilations: The distance in width and height between elements (weights) + @param pads_begin: The number of pixels to add to the beginning along each axis. + @param pads_end: The number of pixels to add to the end along each axis. + @param dilations: The distance in width and height between elements (weights) in the filter. - :param name: The node name. + @param name: The node name. - :returns: The node object representing ConvolutionBackpropData operation. + @return The node object representing ConvolutionBackpropData operation. """ spatial_dim_count = len(strides) if pads_begin is None: @@ -469,22 +469,22 @@ def convolution_backprop_data( @unary_op def cos(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply cosine function on the input node element-wise. + """! Apply cosine function on the input node element-wise. - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with cos operation applied on it. + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with cos operation applied on it. """ return _get_node_factory_opset1().create("Cos", [node]) @unary_op def cosh(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply hyperbolic cosine function on the input node element-wise. + """! Apply hyperbolic cosine function on the input node element-wise. - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with cosh operation applied on it. + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with cosh operation applied on it. """ return _get_node_factory_opset1().create("Cosh", [node]) @@ -503,20 +503,20 @@ def deformable_convolution( deformable_group: int = 1, name: Optional[str] = None, ) -> Node: - """Create node performing deformable convolution. - - :param data: The node providing data batch tensor. - :param filter: The node providing filters tensor. - :param strides: The distance (in pixels) to slide the filter on the feature map over the axes. - :param pads_begin: The number of pixels to add to the beginning along each axis. - :param pads_end: The number of pixels to add to the end along each axis. - :param dilations: The distance in width and height between elements (weights) in the filter. - :param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. - :param group: The number of groups which both output and input should be split into. - :param deformable_group: The number of groups which deformable values and output should be split + """! Create node performing deformable convolution. + + @param data: The node providing data batch tensor. + @param filter: The node providing filters tensor. + @param strides: The distance (in pixels) to slide the filter on the feature map over the axes. + @param pads_begin: The number of pixels to add to the beginning along each axis. + @param pads_end: The number of pixels to add to the end along each axis. + @param dilations: The distance in width and height between elements (weights) in the filter. + @param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. + @param group: The number of groups which both output and input should be split into. + @param deformable_group: The number of groups which deformable values and output should be split into along the channel axis. - :param name: The optional new name for output node. - :return: New node performing deformable convolution operation. + @param name: The optional new name for output node. + @return New node performing deformable convolution operation. """ return _get_node_factory_opset1().create( "DeformableConvolution", @@ -548,24 +548,24 @@ def deformable_psroi_pooling( offsets: Optional[NodeInput] = None, name: Optional[str] = None, ) -> Node: - """Return node performing DeformablePSROIPooling operation. + """! Return node performing DeformablePSROIPooling operation. DeformablePSROIPooling computes position-sensitive pooling on regions of interest specified by input. - :param feature_maps: 4D tensor with feature maps. - :param coords: 2D tensor describing box consisting of tuples: [batch_id, x_1, y_1, x_2, y_2]. - :param output_dim: A pooled output channel number. - :param spatial_scale: A multiplicative spatial scale factor to translate ROI. - :param group_size: The number of groups to encode position-sensitive score. - :param mode: Specifies mode for pooling. Range of values: ['bilinear_deformable']. - :param spatial_bins_x: Specifies numbers of bins to divide the input feature maps over width. - :param spatial_bins_y: Specifies numbers of bins to divide the input feature maps over height. - :param trans_std: The value that all transformation (offset) values are multiplied with. - :param part_size: The number of parts the output tensor spatial dimensions are divided into. - :param offsets: Optional node. 4D input blob with transformation values (offsets). - :param name: The optional new name for output node. - :return: New node performing DeformablePSROIPooling operation. + @param feature_maps: 4D tensor with feature maps. + @param coords: 2D tensor describing box consisting of tuples: [batch_id, x_1, y_1, x_2, y_2]. + @param output_dim: A pooled output channel number. + @param spatial_scale: A multiplicative spatial scale factor to translate ROI. + @param group_size: The number of groups to encode position-sensitive score. + @param mode: Specifies mode for pooling. Range of values: ['bilinear_deformable']. + @param spatial_bins_x: Specifies numbers of bins to divide the input feature maps over width. + @param spatial_bins_y: Specifies numbers of bins to divide the input feature maps over height. + @param trans_std: The value that all transformation (offset) values are multiplied with. + @param part_size: The number of parts the output tensor spatial dimensions are divided into. + @param offsets: Optional node. 4D input blob with transformation values (offsets). + @param name: The optional new name for output node. + @return New node performing DeformablePSROIPooling operation. """ node_inputs = as_nodes(feature_maps, coords) if offsets is not None: @@ -589,7 +589,7 @@ def deformable_psroi_pooling( @nameable_op def depth_to_space(node: Node, mode: str, block_size: int = 1, name: str = None) -> Node: - """Rearranges input tensor from depth into blocks of spatial data. + """! Rearranges input tensor from depth into blocks of spatial data. Values from the height and width dimensions are moved to the depth dimension. @@ -598,18 +598,18 @@ def depth_to_space(node: Node, mode: str, block_size: int = 1, name: str = None) Output node produces a tensor with shape: - [N, C * :code:`block_size` * :code:`block_size`, H / :code:`block_size`, W / :code:`block_size`] + [N, C * `block_size` * `block_size`, H / `block_size`, W / `block_size`] - :param node: The node with input tensor data. - :param mode: Specifies how the input depth dimension is split to block coordinates + @param node: The node with input tensor data. + @param mode: Specifies how the input depth dimension is split to block coordinates blocks_first: The input is divided to [block_size, ..., block_size, new_depth] depth_first: The input is divided to [new_depth, block_size, ..., block_size] - :param block_size: The size of the spatial block of values describing + @param block_size: The size of the spatial block of values describing how the tensor's data is to be rearranged. - :param name: Optional output node name. - :return: The new node performing an DepthToSpace operation on its input tensor. + @param name: Optional output node name. + @return The new node performing an DepthToSpace operation on its input tensor. """ return _get_node_factory_opset1().create( "DepthToSpace", [node], {"mode": mode, "block_size": block_size}, @@ -626,15 +626,15 @@ def detection_output( aux_box_preds: NodeInput = None, name: Optional[str] = None, ) -> Node: - """Generate the detection output using information on location and confidence predictions. + """! Generate the detection output using information on location and confidence predictions. - :param box_logits: The 2D input tensor with box logits. - :param class_preds: The 2D input tensor with class predictions. - :param proposals: The 3D input tensor with proposals. - :param attrs: The dictionary containing key, value pairs for attributes. - :param aux_class_preds: The 2D input tensor with additional class predictions information. - :param aux_box_preds: The 2D input tensor with additional box predictions information. - :param name: Optional name for the output node. + @param box_logits: The 2D input tensor with box logits. + @param class_preds: The 2D input tensor with class predictions. + @param proposals: The 3D input tensor with proposals. + @param attrs: The dictionary containing key, value pairs for attributes. + @param aux_class_preds: The 2D input tensor with additional class predictions information. + @param aux_box_preds: The 2D input tensor with additional box predictions information. + @param name: Optional name for the output node. Available attributes are: @@ -726,8 +726,7 @@ def detection_output( Required: no Example of attribute dictionary: - .. code-block:: python - + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~{.py} # just required ones attrs = { 'num_classes': 85, @@ -744,10 +743,11 @@ def detection_output( 'input_height': [32], 'input_width': [32], } + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Optional attributes which are absent from dictionary will be set with corresponding default. - :return: Node representing DetectionOutput operation. + @return Node representing DetectionOutput operation. """ requirements = [ ("num_classes", True, np.integer, is_positive_value), @@ -786,13 +786,13 @@ def divide( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node which applies f(x) = A/B to the input nodes element-wise. + """! Return node which applies f(x) = A/B to the input nodes element-wise. - :param left_node: The node providing dividend data. - :param right_node: The node providing divisor data. - :param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. - :param name: Optional name for output node. - :return: The node performing element-wise division. + @param left_node: The node providing dividend data. + @param right_node: The node providing divisor data. + @param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. + @param name: Optional name for output node. + @return The node performing element-wise division. """ return _get_node_factory_opset1().create( "Divide", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} @@ -801,18 +801,17 @@ def divide( @nameable_op def elu(data: NodeInput, alpha: NumericType, name: Optional[str] = None) -> Node: - """Perform Exponential Linear Unit operation element-wise on data from input node. + """! Perform Exponential Linear Unit operation element-wise on data from input node. Computes exponential linear: alpha * (exp(data) - 1) if < 0, data otherwise. For more information refer to: - `Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) - `_ + [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](http://arxiv.org/abs/1511.07289) - :param data: Input tensor. One of: input node, array or scalar. - :param alpha: Scalar multiplier for negative values. - :param name: Optional output node name. - :return: The new node performing an ELU operation on its input data element-wise. + @param data: Input tensor. One of: input node, array or scalar. + @param alpha: Scalar multiplier for negative values. + @param name: Optional output node name. + @return The new node performing an ELU operation on its input data element-wise. """ return _get_node_factory_opset1().create("Elu", [as_node(data)], {"alpha": alpha}) @@ -824,14 +823,14 @@ def equal( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node which checks if input nodes are equal element-wise. + """! Return node which checks if input nodes are equal element-wise. - :param left_node: The first input node for equal operation. - :param right_node: The second input node for equal operation. - :param auto_broadcast: The type of broadcasting specifies rules used for + @param left_node: The first input node for equal operation. + @param right_node: The second input node for equal operation. + @param auto_broadcast: The type of broadcasting specifies rules used for auto-broadcasting of input tensors. - :param name: The optional name for output new node. - :return: The node performing element-wise equality check. + @param name: The optional name for output new node. + @return The node performing element-wise equality check. """ return _get_node_factory_opset1().create( "Equal", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} @@ -840,22 +839,22 @@ def equal( @unary_op def erf(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which calculates Gauss error function element-wise with given tensor. + """! Return node which calculates Gauss error function element-wise with given tensor. - :param node: The node providing data for operation. - :param name: The optional name for new output node. - :return: The new node performing element-wise Erf operation. + @param node: The node providing data for operation. + @param name: The optional name for new output node. + @return The new node performing element-wise Erf operation. """ return _get_node_factory_opset1().create("Erf", [node]) @unary_op def exp(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies exponential function to the input node element-wise. + """! Return node which applies exponential function to the input node element-wise. - :param node: The node providing data for operation. - :param name: The optional name for new output node. - :return: The new node performing natural exponential operation. + @param node: The node providing data for operation. + @param name: The optional name for new output node. + @return The new node performing natural exponential operation. """ return _get_node_factory_opset1().create("Exp", [node]) @@ -871,33 +870,35 @@ def fake_quantize( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - r"""Perform an element-wise linear quantization on input data. + r"""! Perform an element-wise linear quantization on input data. Input floating point values are quantized into a discrete set of floating point values. - .. code-block:: python + ~~~~~~~~~~~~~{.py} if x <= input_low: output = output_low if x > input_high: output = output_high else: output = fake_quantize(output) + ~~~~~~~~~~~~~ Fake quantize uses the following logic: - .. math:: output = + \f[ output = \dfrac{round( \dfrac{data - input\_low}{(input\_high - input\_low)\cdot (levels-1)})} {(levels-1)\cdot (output\_high - output\_low)} + output\_low - - :param data: The node with data tensor. - :param input_low: The node with the minimum for input values. - :param input_high: The node with the maximum for input values. - :param output_low: The node with the minimum quantized value. - :param output_high: The node with the maximum quantized value. - :param levels: The number of quantization levels. Integer value. - :param auto_broadcast: The type of broadcasting specifies rules used for + \f] + + @param data: The node with data tensor. + @param input_low: The node with the minimum for input values. + @param input_high: The node with the maximum for input values. + @param output_low: The node with the minimum quantized value. + @param output_high: The node with the maximum quantized value. + @param levels: The number of quantization levels. Integer value. + @param auto_broadcast: The type of broadcasting specifies rules used for auto-broadcasting of input tensors. - :return: New node with quantized value. + @return New node with quantized value. """ return _get_node_factory_opset1().create( "FakeQuantize", @@ -908,11 +909,11 @@ def fake_quantize( @unary_op def floor(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies floor to the input node element-wise. + """! Return node which applies floor to the input node element-wise. - :param node: The input node providing data. - :param name: The optional name for new output node. - :return: The node performing element-wise floor operation. + @param node: The input node providing data. + @param name: The optional name for new output node. + @return The node performing element-wise floor operation. """ return _get_node_factory_opset1().create("Floor", [node]) @@ -924,13 +925,13 @@ def floor_mod( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node performing element-wise FloorMod (division reminder) with two given tensors. + """! Return node performing element-wise FloorMod (division reminder) with two given tensors. - :param left_node: The first input node for FloorMod operation. - :param right_node: The second input node for FloorMod operation. - :param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. - :param name: Optional name for output node. - :return: The node performing element-wise FloorMod operation. + @param left_node: The first input node for FloorMod operation. + @param right_node: The second input node for FloorMod operation. + @param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. + @param name: Optional name for output node. + @return The node performing element-wise FloorMod operation. """ return _get_node_factory_opset1().create( "FloorMod", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} @@ -941,13 +942,13 @@ def floor_mod( def gather( data: NodeInput, indices: NodeInput, axis: NodeInput, name: Optional[str] = None ) -> Node: - """Return Gather node which takes slices from axis of data according to indices. + """! Return Gather node which takes slices from axis of data according to indices. - :param data: The tensor from which slices are gathered. - :param indices: Tensor with indexes to gather. - :param axis: The dimension index to gather data from. - :param name: Optional name for output node. - :return: The new node performing a Gather operation on the data input tensor. + @param data: The tensor from which slices are gathered. + @param indices: Tensor with indexes to gather. + @param axis: The dimension index to gather data from. + @param name: Optional name for output node. + @return The new node performing a Gather operation on the data input tensor. """ node_inputs = as_nodes(data, indices, axis) return _get_node_factory_opset1().create("Gather", node_inputs) @@ -961,14 +962,13 @@ def gather_tree( end_token: NodeInput, name: Optional[str] = None, ) -> Node: - """Perform GatherTree operation. + """! Perform GatherTree operation. The GatherTree node generates the complete beams from the indices per each step and the parent beam indices. GatherTree uses the following logic: - .. code-block:: python - + ~~~~~~~~~~~~~{.py} for batch in range(BATCH_SIZE): for beam in range(BEAM_WIDTH): max_sequence_in_beam = min(MAX_TIME, max_seq_len[batch]) @@ -979,14 +979,15 @@ def gather_tree( final_idx[level, batch, beam] = step_idx[level, batch, parent] parent = parent_idx[level, batch, parent] + ~~~~~~~~~~~~~ - :param step_ids: The tensor with indices from per each step. - :param parent_idx: The tensor with with parent beam indices. - :param max_seq_len: The tensor with maximum lengths for each sequence in the batch. - :param end_token: The scalar tensor with value of the end marker in a sequence. - :param name: Optional name for output node. - :return: The new node performing a GatherTree operation. + @param step_ids: The tensor with indices from per each step. + @param parent_idx: The tensor with with parent beam indices. + @param max_seq_len: The tensor with maximum lengths for each sequence in the batch. + @param end_token: The scalar tensor with value of the end marker in a sequence. + @param name: Optional name for output node. + @return The new node performing a GatherTree operation. """ node_inputs = as_nodes(step_ids, parent_idx, max_seq_len, end_token) return _get_node_factory_opset1().create("GatherTree", node_inputs) @@ -999,14 +1000,14 @@ def greater( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node which checks if left input node is greater than the right node element-wise. + """! Return node which checks if left input node is greater than the right node element-wise. - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting specifies rules used for + @param left_node: The first input node providing data. + @param right_node: The second input node providing data. + @param auto_broadcast: The type of broadcasting specifies rules used for auto-broadcasting of input tensors. - :param name: The optional new name for output node. - :return: The node performing element-wise check whether left_node is greater than right_node. + @param name: The optional new name for output node. + @return The node performing element-wise check whether left_node is greater than right_node. """ return _get_node_factory_opset1().create( "Greater", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} @@ -1020,14 +1021,14 @@ def greater_equal( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node which checks if left node is greater or equal to the right node element-wise. + """! Return node which checks if left node is greater or equal to the right node element-wise. - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting specifies rules used for + @param left_node: The first input node providing data. + @param right_node: The second input node providing data. + @param auto_broadcast: The type of broadcasting specifies rules used for auto-broadcasting of input tensors. - :param name: The optional new name for output node. - :return: The node performing element-wise check whether left_node is greater than or equal + @param name: The optional new name for output node. + @return The node performing element-wise check whether left_node is greater than or equal right_node. """ return _get_node_factory_opset1().create( @@ -1036,16 +1037,16 @@ def greater_equal( def grn(data: Node, bias: float, name: Optional[str] = None) -> Node: - r"""Perform Global Response Normalization with L2 norm (across channels only). + r"""! Perform Global Response Normalization with L2 norm (across channels only). Computes GRN operation on channels for input tensor: - .. math:: output_i = \dfrac{input_i}{\sqrt{\sum_{i}^{C} input_i}} + \f[ output_i = \dfrac{input_i}{\sqrt{\sum_{i}^{C} input_i}} \f] - :param data: The node with data tensor. - :param bias: The bias added to the variance. Scalar value. - :param name: Optional output node name. - :return: The new node performing a GRN operation on tensor's channels. + @param data: The node with data tensor. + @param bias: The bias added to the variance. Scalar value. + @param name: Optional output node name. + @return The new node performing a GRN operation on tensor's channels. """ return _get_node_factory_opset1().create("GRN", [data], {"bias": bias}) @@ -1061,16 +1062,16 @@ def group_convolution( auto_pad: str = "EXPLICIT", name: Optional[str] = None, ) -> Node: - """Perform Group Convolution operation on data from input node. + """! Perform Group Convolution operation on data from input node. - :param data: The node producing input data. - :param filters: The node producing filters data. - :param strides: The distance (in pixels) to slide the filter on the feature map + @param data: The node producing input data. + @param filters: The node producing filters data. + @param strides: The distance (in pixels) to slide the filter on the feature map over the axes. - :param pads_begin: The number of pixels to add at the beginning along each axis. - :param pads_end: The number of pixels to add at the end along each axis. - :param dilations: The distance in width and height between elements (weights) in the filter. - :param auto_pad: Describes how to perform padding. Possible values: + @param pads_begin: The number of pixels to add at the beginning along each axis. + @param pads_end: The number of pixels to add at the end along each axis. + @param dilations: The distance in width and height between elements (weights) in the filter. + @param auto_pad: Describes how to perform padding. Possible values: EXPLICIT: Pad dimensions are explicity specified SAME_LOWER: Pad dimensions computed to match input shape Ceil(num_dims/2) at the beginning and @@ -1079,8 +1080,8 @@ def group_convolution( Floor(num_dims/2) at the beginning and Ceil(num_dims/2) at the end VALID: No padding - :param name: Optional output node name. - :return: The new node performing a Group Convolution operation on tensor from input node. + @param name: Optional output node name. + @return The new node performing a Group Convolution operation on tensor from input node. """ return _get_node_factory_opset1().create( "GroupConvolution", @@ -1108,18 +1109,18 @@ def group_convolution_backprop_data( output_padding: Optional[List[int]] = None, name: Optional[str] = None, ) -> Node: - """Perform Group Convolution operation on data from input node. + """! Perform Group Convolution operation on data from input node. - :param data: The node producing input data. - :param filters: The node producing filter data. - :param strides: The distance (in pixels) to slide the filter on the feature map + @param data: The node producing input data. + @param filters: The node producing filter data. + @param strides: The distance (in pixels) to slide the filter on the feature map over the axes. - :param output_shape: The node that specifies spatial shape of the output. - :param pads_begin: The number of pixels to add at the beginning along each axis. - :param pads_end: The number of pixels to add at the end along each axis. - :param dilations: The distance in width and height between elements (weights) + @param output_shape: The node that specifies spatial shape of the output. + @param pads_begin: The number of pixels to add at the beginning along each axis. + @param pads_end: The number of pixels to add at the end along each axis. + @param dilations: The distance in width and height between elements (weights) in the filter. - :param auto_pad: Describes how to perform padding. Possible values: + @param auto_pad: Describes how to perform padding. Possible values: EXPLICIT: Pad dimensions are explicity specified SAME_LOWER: Pad dimensions computed to match input shape Ceil(num_dims/2) at the beginning and @@ -1128,10 +1129,10 @@ def group_convolution_backprop_data( Floor(num_dims/2) at the beginning and Ceil(num_dims/2) at the end VALID: No padding - :param output_padding: The additional amount of paddings added per each spatial axis + @param output_padding: The additional amount of paddings added per each spatial axis in the output tensor. - :param name: Optional output node name. - :return: The new node performing a Group Convolution operation on tensor from input node. + @param name: Optional output node name. + @return The new node performing a Group Convolution operation on tensor from input node. """ spatial_dim_count = len(strides) if dilations is None: @@ -1162,19 +1163,19 @@ def group_convolution_backprop_data( @nameable_op def hard_sigmoid(data: Node, alpha: NodeInput, beta: NodeInput, name: Optional[str] = None) -> Node: - """Perform Hard Sigmoid operation element-wise on data from input node. + """! Perform Hard Sigmoid operation element-wise on data from input node. Hard Sigmoid uses the following logic: - .. code-block:: python - + ~~~~~~~~~~~~~{.py} y = max(0, min(1, alpha * data + beta)) + ~~~~~~~~~~~~~ - :param data: The node with data tensor. - :param alpha: A node producing the alpha parameter. - :param beta: A node producing the beta parameter - :param name: Optional output node name. - :return: The new node performing a Hard Sigmoid element-wise on input tensor. + @param data: The node with data tensor. + @param alpha: A node producing the alpha parameter. + @param beta: A node producing the beta parameter + @param name: Optional output node name. + @return The new node performing a Hard Sigmoid element-wise on input tensor. """ return _get_node_factory_opset1().create("HardSigmoid", [data, as_node(alpha), as_node(beta)]) @@ -1183,12 +1184,12 @@ def hard_sigmoid(data: Node, alpha: NodeInput, beta: NodeInput, name: Optional[s def interpolate( image: Node, output_shape: NodeInput, attrs: dict, name: Optional[str] = None ) -> Node: - """Perform interpolation of independent slices in input tensor. + """! Perform interpolation of independent slices in input tensor. - :param image: The node providing input tensor with data for interpolation. - :param output_shape: 1D tensor describing output shape for spatial axes. - :param attrs: The dictionary containing key, value pairs for attributes. - :param name: Optional name for the output node. + @param image: The node providing input tensor with data for interpolation. + @param output_shape: 1D tensor describing output shape for spatial axes. + @param attrs: The dictionary containing key, value pairs for attributes. + @param name: Optional name for the output node. Available attributes are: @@ -1223,8 +1224,7 @@ def interpolate( Required: no Example of attribute dictionary: - .. code-block:: python - + ~~~~~~~~~~~~~ # just required ones attrs = { 'axes': [2, 3], @@ -1237,10 +1237,10 @@ def interpolate( 'antialias': True, 'pads_begin': [2, 2, 2], } - + ~~~~~~~~~~~~~ Optional attributes which are absent from dictionary will be set with corresponding default. - :return: Node representing interpolation operation. + @return Node representing interpolation operation. """ requirements = [ ("axes", True, np.integer, is_non_negative_value), @@ -1263,14 +1263,14 @@ def less( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node which checks if left input node is less than the right node element-wise. + """! Return node which checks if left input node is less than the right node element-wise. - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting specifies rules used for + @param left_node: The first input node providing data. + @param right_node: The second input node providing data. + @param auto_broadcast: The type of broadcasting specifies rules used for auto-broadcasting of input tensors. - :param name: The optional new name for output node. - :return: The node performing element-wise check whether left_node is less than the right_node. + @param name: The optional new name for output node. + @return The node performing element-wise check whether left_node is less than the right_node. """ return _get_node_factory_opset1().create( "Less", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} @@ -1284,14 +1284,14 @@ def less_equal( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node which checks if left input node is less or equal the right node element-wise. + """! Return node which checks if left input node is less or equal the right node element-wise. - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting specifies rules used for + @param left_node: The first input node providing data. + @param right_node: The second input node providing data. + @param auto_broadcast: The type of broadcasting specifies rules used for auto-broadcasting of input tensors. - :param name: The optional new name for output node. - :return: The node performing element-wise check whether left_node is less than or equal the + @param name: The optional new name for output node. + @return The node performing element-wise check whether left_node is less than or equal the right_node. """ return _get_node_factory_opset1().create( @@ -1301,11 +1301,11 @@ def less_equal( @unary_op def log(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies natural logarithm to the input node element-wise. + """! Return node which applies natural logarithm to the input node element-wise. - :param node: The input node providing data for operation. - :param name: The optional new name for output node. - :return: The new node performing log operation element-wise. + @param node: The input node providing data for operation. + @param name: The optional new name for output node. + @return The new node performing log operation element-wise. """ return _get_node_factory_opset1().create("Log", [node]) @@ -1317,14 +1317,14 @@ def logical_and( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node which perform logical and operation on input nodes element-wise. + """! Return node which perform logical and operation on input nodes element-wise. - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes + @param left_node: The first input node providing data. + @param right_node: The second input node providing data. + @param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes to output shape axes. Range of values: numpy, explicit. - :param name: The optional new name for output node. - :return: The node performing logical and operation on input nodes corresponding elements. + @param name: The optional new name for output node. + @return The node performing logical and operation on input nodes corresponding elements. """ return _get_node_factory_opset1().create( "LogicalAnd", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} @@ -1333,11 +1333,11 @@ def logical_and( @unary_op def logical_not(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies element-wise logical negation to the input node. + """! Return node which applies element-wise logical negation to the input node. - :param node: The input node providing data. - :param name: The optional new name for output node. - :return: The node performing element-wise logical NOT operation with given tensor. + @param node: The input node providing data. + @param name: The optional new name for output node. + @return The node performing element-wise logical NOT operation with given tensor. """ return _get_node_factory_opset1().create("LogicalNot", [node]) @@ -1349,14 +1349,14 @@ def logical_or( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node which performs logical OR operation on input nodes element-wise. + """! Return node which performs logical OR operation on input nodes element-wise. - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes + @param left_node: The first input node providing data. + @param right_node: The second input node providing data. + @param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes to output shape axes. Range of values: numpy, explicit. - :param name: The optional new name for output node. - :return: The node performing logical or operation on input nodes corresponding elements. + @param name: The optional new name for output node. + @return The node performing logical or operation on input nodes corresponding elements. """ return _get_node_factory_opset1().create( "LogicalOr", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} @@ -1370,14 +1370,14 @@ def logical_xor( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node which performs logical XOR operation on input nodes element-wise. + """! Return node which performs logical XOR operation on input nodes element-wise. - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes + @param left_node: The first input node providing data. + @param right_node: The second input node providing data. + @param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes to output shape axes. Range of values: numpy, explicit. - :param name: The optional new name for output node. - :return: The node performing logical or operation on input nodes corresponding elements. + @param name: The optional new name for output node. + @return The node performing logical or operation on input nodes corresponding elements. """ return _get_node_factory_opset1().create( "LogicalXor", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} @@ -1394,15 +1394,15 @@ def lrn( size: int = 5, name: Optional[str] = None, ) -> Node: - """Return a node which performs element-wise Local Response Normalization (LRN) operation. + """! Return a node which performs element-wise Local Response Normalization (LRN) operation. - :param data: Input data. - :param alpha: A scale factor (usually positive). - :param beta: An exponent. - :param bias: An offset (usually positive) to avoid dividing by 0. - :param size: Width of the 1-D normalization window. - :param name: An optional name of the output node. - :return: The new node which performs LRN. + @param data: Input data. + @param alpha: A scale factor (usually positive). + @param beta: An exponent. + @param bias: An offset (usually positive) to avoid dividing by 0. + @param size: Width of the 1-D normalization window. + @param name: An optional name of the output node. + @return The new node which performs LRN. """ attributes = {"alpha": alpha, "beta": beta, "bias": bias, "size": size} return _get_node_factory_opset1().create("LRN", as_nodes(data, axes), attributes) @@ -1423,22 +1423,22 @@ def lstm_cell( clip: float = 0.0, name: Optional[str] = None, ) -> Node: - """Return a node which performs LSTMCell operation. + """! Return a node which performs LSTMCell operation. - :param X: The input tensor with shape: [batch_size, input_size]. - :param initial_hidden_state: The hidden state tensor with shape: [batch_size, hidden_size]. - :param initial_cell_state: The cell state tensor with shape: [batch_size, hidden_size]. - :param W: The weight tensor with shape: [4*hidden_size, input_size]. - :param R: The recurrence weight tensor with shape: [4*hidden_size, hidden_size]. - :param B: The bias tensor for gates with shape: [4*hidden_size]. - :param hidden_size: Specifies hidden state size. - :param activations: The list of three activation functions for gates. - :param activations_alpha: The list of alpha parameters for activation functions. - :param activations_beta: The list of beta parameters for activation functions. - :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. - :param name: An optional name of the output node. + @param X: The input tensor with shape: [batch_size, input_size]. + @param initial_hidden_state: The hidden state tensor with shape: [batch_size, hidden_size]. + @param initial_cell_state: The cell state tensor with shape: [batch_size, hidden_size]. + @param W: The weight tensor with shape: [4*hidden_size, input_size]. + @param R: The recurrence weight tensor with shape: [4*hidden_size, hidden_size]. + @param B: The bias tensor for gates with shape: [4*hidden_size]. + @param hidden_size: Specifies hidden state size. + @param activations: The list of three activation functions for gates. + @param activations_alpha: The list of alpha parameters for activation functions. + @param activations_beta: The list of beta parameters for activation functions. + @param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. + @param name: An optional name of the output node. - :return: The new node represents LSTMCell. Node outputs count: 2. + @return The new node represents LSTMCell. Node outputs count: 2. """ if activations is None: activations = ["sigmoid", "tanh", "tanh"] @@ -1489,30 +1489,30 @@ def lstm_sequence( clip: float = 0.0, name: Optional[str] = None, ) -> Node: - """Return a node which performs LSTMSequence operation. + """! Return a node which performs LSTMSequence operation. - :param X: The input tensor. Shape: [batch_size, seq_length, input_size]. - :param initial_hidden_state: The hidden state tensor. + @param X: The input tensor. Shape: [batch_size, seq_length, input_size]. + @param initial_hidden_state: The hidden state tensor. Shape: [batch_size, num_directions, hidden_size]. - :param initial_cell_state: The cell state tensor. + @param initial_cell_state: The cell state tensor. Shape: [batch_size, num_directions, hidden_size]. - :param sequence_lengths: Specifies real sequence lengths for each batch element. + @param sequence_lengths: Specifies real sequence lengths for each batch element. Shape: [batch_size]. Integer type. - :param W: Tensor with weights for matrix multiplication operation with input portion of data. + @param W: Tensor with weights for matrix multiplication operation with input portion of data. Shape: [num_directions, 4*hidden_size, input_size]. - :param R: The tensor with weights for matrix multiplication operation with hidden state. + @param R: The tensor with weights for matrix multiplication operation with hidden state. Shape: [num_directions, 4*hidden_size, input_size]. - :param B: The tensor with biases. + @param B: The tensor with biases. Shape: [num_directions, 4*hidden_size, hidden_size]. - :param hidden_size: Specifies hidden state size. - :param direction: Specifies if the RNN is forward, reverse, or bidirectional. - :param activations: The list of three activation functions for gates. - :param activations_alpha: The list of alpha parameters for activation functions. - :param activations_beta: The list of beta parameters for activation functions. - :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. - :param name: An optional name of the output node. + @param hidden_size: Specifies hidden state size. + @param direction: Specifies if the RNN is forward, reverse, or bidirectional. + @param activations: The list of three activation functions for gates. + @param activations_alpha: The list of alpha parameters for activation functions. + @param activations_beta: The list of beta parameters for activation functions. + @param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. + @param name: An optional name of the output node. - :return: The new node represents LSTMSequence. Node outputs count: 3. + @return The new node represents LSTMSequence. Node outputs count: 3. """ if activations is None: activations = ["sigmoid", "tanh", "tanh"] @@ -1559,13 +1559,13 @@ def matmul( transpose_b: bool, name: Optional[str] = None, ) -> Node: - """Return the Matrix Multiplication operation. + """! Return the Matrix Multiplication operation. - :param data_a: left-hand side matrix - :param data_b: right-hand side matrix - :param transpose_a: should the first matrix be transposed before operation - :param transpose_b: should the second matrix be transposed - :return: MatMul operation node + @param data_a: left-hand side matrix + @param data_b: right-hand side matrix + @param transpose_a: should the first matrix be transposed before operation + @param transpose_b: should the second matrix be transposed + @return MatMul operation node """ print("transpose_a", transpose_a, "transpose_b", transpose_b) return _get_node_factory_opset1().create( @@ -1584,21 +1584,21 @@ def max_pool( auto_pad: Optional[str] = None, name: Optional[str] = None, ) -> Node: - """Perform max pooling operation with given parameters on provided data. + """! Perform max pooling operation with given parameters on provided data. - :param data: The node providing input data. - :param strides: The distance (in pixels) to slide the filter on the feature map + @param data: The node providing input data. + @param strides: The distance (in pixels) to slide the filter on the feature map over the axes. - :param pads_begin: The number of pixels to add at the beginning along each axis. - :param pads_end: The number of pixels to add at the end along each axis. - :param kernel_shape: The pooling operation kernel shape. - :param rounding_type: Determines used rounding schema when computing output shape. Acceptable + @param pads_begin: The number of pixels to add at the beginning along each axis. + @param pads_end: The number of pixels to add at the end along each axis. + @param kernel_shape: The pooling operation kernel shape. + @param rounding_type: Determines used rounding schema when computing output shape. Acceptable values are: ['floor', 'ceil'] - :param auto_pad: Determines how the padding is calculated. Acceptable values: + @param auto_pad: Determines how the padding is calculated. Acceptable values: [None, 'same_upper', 'same_lower', 'valid'] - :param name: The optional name for the created output node. + @param name: The optional name for the created output node. - :returns: The new node performing max pooling operation. + @return The new node performing max pooling operation. """ if auto_pad is None: auto_pad = "explicit" @@ -1623,7 +1623,7 @@ def maximum( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node which applies the maximum operation to input nodes elementwise.""" + """! Return node which applies the maximum operation to input nodes elementwise.""" return _get_node_factory_opset1().create( "Maximum", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} ) @@ -1636,7 +1636,7 @@ def minimum( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node which applies the minimum operation to input nodes elementwise.""" + """! Return node which applies the minimum operation to input nodes elementwise.""" return _get_node_factory_opset1().create( "Minimum", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} ) @@ -1649,13 +1649,13 @@ def mod( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node performing element-wise division reminder with two given tensors. + """! Return node performing element-wise division reminder with two given tensors. - :param left_node: The first input node for mod operation. - :param right_node: The second input node for mod operation. - :param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. - :param name: Optional name for output node. - :return: The node performing element-wise Mod operation. + @param left_node: The first input node for mod operation. + @param right_node: The second input node for mod operation. + @param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. + @param name: Optional name for output node. + @return The node performing element-wise Mod operation. """ return _get_node_factory_opset1().create( "Mod", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} @@ -1669,7 +1669,7 @@ def multiply( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node which applies f(x) = A*B to the input nodes elementwise.""" + """! Return node which applies f(x) = A*B to the input nodes elementwise.""" return _get_node_factory_opset1().create( "Multiply", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} ) @@ -1677,7 +1677,7 @@ def multiply( @unary_op def negative(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies f(x) = -x to the input node elementwise.""" + """! Return node which applies f(x) = -x to the input node elementwise.""" return _get_node_factory_opset1().create("Negative", [node]) @@ -1692,18 +1692,18 @@ def non_max_suppression( sort_result_descending: bool = True, name: Optional[str] = None, ) -> Node: - """Return a node which performs NonMaxSuppression. + """! Return a node which performs NonMaxSuppression. - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param max_output_boxes_per_class: Tensor Specifying maximum number of boxes + @param boxes: Tensor with box coordinates. + @param scores: Tensor with box scores. + @param max_output_boxes_per_class: Tensor Specifying maximum number of boxes to be selected per class. - :param iou_threshold: Tensor specifying intersection over union threshold - :param score_threshold: Tensor specifying minimum score to consider box for the processing. - :param box_encoding: Format of boxes data encoding. Range of values: corner or cente. - :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected + @param iou_threshold: Tensor specifying intersection over union threshold + @param score_threshold: Tensor specifying minimum score to consider box for the processing. + @param box_encoding: Format of boxes data encoding. Range of values: corner or cente. + @param sort_result_descending: Flag that specifies whenever it is necessary to sort selected boxes across batches or not. - :return: The new node which performs NonMaxSuppression + @return The new node which performs NonMaxSuppression """ if max_output_boxes_per_class is None: max_output_boxes_per_class = make_constant_node(0, np.int64) @@ -1725,13 +1725,13 @@ def non_max_suppression( def normalize_l2( data: NodeInput, axes: NodeInput, eps: float, eps_mode: str, name: Optional[str] = None ) -> Node: - """Construct an NormalizeL2 operation. + """! Construct an NormalizeL2 operation. - :param data: Node producing the input tensor - :param axes: Node indicating axes along which L2 reduction is calculated - :param eps: The epsilon added to L2 norm - :param eps_mode: how eps is combined with L2 value (`add` or `max`) - :return: New node which performs the L2 normalization. + @param data: Node producing the input tensor + @param axes: Node indicating axes along which L2 reduction is calculated + @param eps: The epsilon added to L2 norm + @param eps_mode: how eps is combined with L2 value (`add` or `max`) + @return New node which performs the L2 normalization. """ return _get_node_factory_opset1().create( "NormalizeL2", as_nodes(data, axes), {"eps": eps, "mode": eps_mode} @@ -1745,14 +1745,14 @@ def not_equal( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node which checks if input nodes are unequal element-wise. + """! Return node which checks if input nodes are unequal element-wise. - :param left_node: The first input node for not-equal operation. - :param right_node: The second input node for not-equal operation. - :param auto_broadcast: The type of broadcasting specifies rules used for + @param left_node: The first input node for not-equal operation. + @param right_node: The second input node for not-equal operation. + @param auto_broadcast: The type of broadcasting specifies rules used for auto-broadcasting of input tensors. - :param name: The optional name for output new node. - :return: The node performing element-wise inequality check. + @param name: The optional name for output new node. + @return The node performing element-wise inequality check. """ return _get_node_factory_opset1().create( "NotEqual", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} @@ -1768,18 +1768,18 @@ def one_hot( axis: int, name: Optional[str] = None, ) -> Node: - """Create node performing one-hot encoding on input data. + """! Create node performing one-hot encoding on input data. - :param indices: Input tensor of rank N with indices of any supported integer data type. - :param depth: Scalar of any supported integer type that specifies number of classes and + @param indices: Input tensor of rank N with indices of any supported integer data type. + @param depth: Scalar of any supported integer type that specifies number of classes and the size of one-hot dimension. - :param on_value: Scalar of any type that is the value that the locations + @param on_value: Scalar of any type that is the value that the locations in output tensor represented by indices in input take. - :param off_value: Scalar of any type that is the value that the locations not represented + @param off_value: Scalar of any type that is the value that the locations not represented by indices in input take. - :param name: The optional name for new output node. - :return: New node performing one-hot operation. + @param name: The optional name for new output node. + @return New node performing one-hot operation. """ return _get_node_factory_opset1().create( "OneHot", as_nodes(indices, depth, on_value, off_value), {"axis": axis} @@ -1795,15 +1795,15 @@ def pad( arg_pad_value: Optional[NodeInput] = None, name: Optional[str] = None, ) -> Node: - """Return a generic padding operation. + """! Return a generic padding operation. - :param arg: The node producing input tensor to be padded. - :param pads_begin: number of padding elements to be added before position 0 + @param arg: The node producing input tensor to be padded. + @param pads_begin: number of padding elements to be added before position 0 on each axis of arg. - :param pads_end: number of padding elements to be added after the last element. - :param pad_mode: "constant", "edge", "reflect" or "symmetric" - :param arg_pad_value: value used for padding if pad_mode is "constant" - :return: Pad operation node. + @param pads_end: number of padding elements to be added after the last element. + @param pad_mode: "constant", "edge", "reflect" or "symmetric" + @param arg_pad_value: value used for padding if pad_mode is "constant" + @return Pad operation node. """ input_nodes = as_nodes(arg, pads_begin, pads_end) if arg_pad_value: @@ -1817,7 +1817,7 @@ def pad( def parameter( shape: TensorShape, dtype: NumericType = np.float32, name: Optional[str] = None ) -> Parameter: - """Return an ngraph Parameter object.""" + """! Return an ngraph Parameter object.""" element_type = get_element_type(dtype) return Parameter(element_type, PartialShape(shape)) @@ -1829,14 +1829,14 @@ def power( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node which perform element-wise exponentiation operation. + """! Return node which perform element-wise exponentiation operation. - :param left_node: The node providing the base of operation. - :param right_node: The node providing the exponent of operation. - :param name: The optional name for the new output node. - :param auto_broadcast: The type of broadcasting specifies rules used for + @param left_node: The node providing the base of operation. + @param right_node: The node providing the exponent of operation. + @param name: The optional name for the new output node. + @param auto_broadcast: The type of broadcasting specifies rules used for auto-broadcasting of input tensors. - :return: The new node performing element-wise exponentiation operation on input nodes. + @return The new node performing element-wise exponentiation operation on input nodes. """ return _get_node_factory_opset1().create( "Power", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} @@ -1845,21 +1845,21 @@ def power( @nameable_op def prelu(data: NodeInput, slope: NodeInput, name: Optional[str] = None) -> Node: - """Perform Parametrized Relu operation element-wise on data from input node. + """! Perform Parametrized Relu operation element-wise on data from input node. PRelu uses the following logic: - .. code-block:: python - + ~~~~~~~~~~~~~{.py} if data < 0: data = data * slope elif data >= 0: data = data + ~~~~~~~~~~~~~ - :param data: The node with data tensor. - :param slope: The node with the multipliers for negative values. - :param name: Optional output node name. - :return: The new node performing a PRelu operation on tensor's channels. + @param data: The node with data tensor. + @param slope: The node with the multipliers for negative values. + @param name: Optional output node name. + @return The new node performing a PRelu operation on tensor's channels. """ return _get_node_factory_opset1().create("PRelu", as_nodes(data, slope)) @@ -1868,14 +1868,14 @@ def prelu(data: NodeInput, slope: NodeInput, name: Optional[str] = None) -> Node def prior_box_clustered( output_size: Node, image_size: NodeInput, attrs: dict, name: Optional[str] = None ) -> Node: - """Generate prior boxes of specified sizes normalized to the input image size. + """! Generate prior boxes of specified sizes normalized to the input image size. - :param output_size: 1D tensor with two integer elements [height, width]. Specifies the + @param output_size: 1D tensor with two integer elements [height, width]. Specifies the spatial size of generated grid with boxes. - :param image_size: 1D tensor with two integer elements [image_height, image_width] that + @param image_size: 1D tensor with two integer elements [image_height, image_width] that specifies shape of the image for which boxes are generated. - :param attrs: The dictionary containing key, value pairs for attributes. - :param name: Optional name for the output node. + @param attrs: The dictionary containing key, value pairs for attributes. + @param name: Optional name for the output node. Available attributes are: @@ -1916,8 +1916,7 @@ def prior_box_clustered( Required: no Example of attribute dictionary: - .. code-block:: python - + ~~~~~~~~~~~~~{.py} # just required ones attrs = { 'offset': 85, @@ -1928,10 +1927,11 @@ def prior_box_clustered( 'clip': False, 'step_widths': [1.5, 2.0, 2.5] } + ~~~~~~~~~~~~~ Optional attributes which are absent from dictionary will be set with corresponding default. - :return: Node representing PriorBoxClustered operation. + @return Node representing PriorBoxClustered operation. """ requirements = [ ("widths", False, np.floating, is_positive_value), @@ -1954,12 +1954,12 @@ def prior_box_clustered( def prior_box( layer_shape: Node, image_shape: NodeInput, attrs: dict, name: Optional[str] = None ) -> Node: - """Generate prior boxes of specified sizes and aspect ratios across all dimensions. + """! Generate prior boxes of specified sizes and aspect ratios across all dimensions. - :param layer_shape: Shape of layer for which prior boxes are computed. - :param image_shape: Shape of image to which prior boxes are scaled. - :param attrs: The dictionary containing key, value pairs for attributes. - :param name: Optional name for the output node. + @param layer_shape: Shape of layer for which prior boxes are computed. + @param image_shape: Shape of image to which prior boxes are scaled. + @param attrs: The dictionary containing key, value pairs for attributes. + @param name: Optional name for the output node. Available attributes are: @@ -2027,8 +2027,7 @@ def prior_box( Required: no Example of attribute dictionary: - .. code-block:: python - + ~~~~~~~~~~~~~{.py} # just required ones attrs = { 'offset': 85, @@ -2040,10 +2039,11 @@ def prior_box( 'clip': True, 'fixed_size': [32, 64, 128] } + ~~~~~~~~~~~~~ Optional attributes which are absent from dictionary will be set with corresponding default. - :return: Node representing prior box operation. + @return Node representing prior box operation. """ requirements = [ ("offset", True, np.floating, is_non_negative_value), @@ -2073,13 +2073,13 @@ def proposal( attrs: dict, name: Optional[str] = None, ) -> Node: - """Filter bounding boxes and outputs only those with the highest prediction confidence. + """! Filter bounding boxes and outputs only those with the highest prediction confidence. - :param class_probs: 4D input floating point tensor with class prediction scores. - :param bbox_deltas: 4D input floating point tensor with box logits. - :param image_shape: The 1D input tensor with 3 or 4 elements describing image shape. - :param attrs: The dictionary containing key, value pairs for attributes. - :param name: Optional name for the output node. + @param class_probs: 4D input floating point tensor with class prediction scores. + @param bbox_deltas: 4D input floating point tensor with box logits. + @param image_shape: The 1D input tensor with 3 or 4 elements describing image shape. + @param attrs: The dictionary containing key, value pairs for attributes. + @param name: Optional name for the output node. * base_size The size of the anchor to which scale and ratio attributes are applied. Range of values: a positive unsigned integer number @@ -2159,8 +2159,7 @@ def proposal( Example of attribute dictionary: - .. code-block:: python - + ~~~~~~~~~~~~~{.py} # just required ones attrs = { 'base_size': 85, @@ -2172,10 +2171,11 @@ def proposal( 'ratio': [0.1, 1.5, 2.0, 2.5], 'scale': [2, 3, 3, 4], } + ~~~~~~~~~~~~~ Optional attributes which are absent from dictionary will be set with corresponding default. - :return: Node representing Proposal operation. + @return Node representing Proposal operation. """ requirements = [ ("base_size", True, np.unsignedinteger, is_positive_value), @@ -2213,17 +2213,17 @@ def psroi_pooling( mode: str, name: Optional[str] = None, ) -> Node: - """Return a node which produces a PSROIPooling operation. + """! Return a node which produces a PSROIPooling operation. - :param input: Input feature map {N, C, ...} - :param coords: Coordinates of bounding boxes - :param output_dim: Output channel number - :param group_size: Number of groups to encode position-sensitive scores - :param spatial_scale: Ratio of input feature map over input image size - :param spatial_bins_x: Numbers of bins to divide the input feature maps over - :param spatial_bins_y: Numbers of bins to divide the input feature maps over - :param mode: Mode of pooling - "avg" or "bilinear" - :return: PSROIPooling node + @param input: Input feature map {N, C, ...} + @param coords: Coordinates of bounding boxes + @param output_dim: Output channel number + @param group_size: Number of groups to encode position-sensitive scores + @param spatial_scale: Ratio of input feature map over input image size + @param spatial_bins_x: Numbers of bins to divide the input feature maps over + @param spatial_bins_y: Numbers of bins to divide the input feature maps over + @param mode: Mode of pooling - "avg" or "bilinear" + @return PSROIPooling node """ mode = mode.lower() return _get_node_factory_opset1().create( @@ -2242,24 +2242,24 @@ def psroi_pooling( @nameable_op def range(start: Node, stop: NodeInput, step: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which produces the Range operation. + """! Return a node which produces the Range operation. - :param start: The start value of the generated range - :param stop: The stop value of the generated range - :param step: The step value for the generated range - :param name: Optional name for output node. - :return: Range node + @param start: The start value of the generated range + @param stop: The stop value of the generated range + @param step: The step value for the generated range + @param name: Optional name for output node. + @return Range node """ return _get_node_factory_opset1().create("Range", as_nodes(start, stop, step)) @unary_op def relu(node: NodeInput, name: Optional[str] = None) -> Node: - """Perform rectified linear unit operation on input node element-wise. + """! Perform rectified linear unit operation on input node element-wise. - :param node: One of: input node, array or scalar. - :param name: The optional output node name. - :return: The new node performing relu operation on its input element-wise. + @param node: One of: input node, array or scalar. + @param name: The optional output node name. + @return The new node performing relu operation on its input element-wise. """ return _get_node_factory_opset1().create("Relu", [node]) @@ -2268,13 +2268,13 @@ def relu(node: NodeInput, name: Optional[str] = None) -> Node: def reduce_logical_and( node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None ) -> Node: - """Logical AND reduction operation on input tensor, eliminating the specified reduction axes. + """! Logical AND reduction operation on input tensor, eliminating the specified reduction axes. - :param node: The tensor we want to reduce. - :param reduction_axes: The axes to eliminate through AND operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing reduction operation. + @param node: The tensor we want to reduce. + @param reduction_axes: The axes to eliminate through AND operation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: Optional name for output node. + @return The new node performing reduction operation. """ return _get_node_factory_opset1().create( "ReduceLogicalAnd", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} @@ -2285,13 +2285,13 @@ def reduce_logical_and( def reduce_logical_or( node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None ) -> Node: - """Logical OR reduction operation on input tensor, eliminating the specified reduction axes. + """! Logical OR reduction operation on input tensor, eliminating the specified reduction axes. - :param node: The tensor we want to reduce. - :param reduction_axes: The axes to eliminate through OR operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing reduction operation. + @param node: The tensor we want to reduce. + @param reduction_axes: The axes to eliminate through OR operation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: Optional name for output node. + @return The new node performing reduction operation. """ return _get_node_factory_opset1().create( "ReduceLogicalOr", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} @@ -2302,12 +2302,12 @@ def reduce_logical_or( def reduce_max( node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None ) -> Node: - """Max-reduction operation on input tensor, eliminating the specified reduction axes. + """! Max-reduction operation on input tensor, eliminating the specified reduction axes. - :param node: The tensor we want to max-reduce. - :param reduction_axes: The axes to eliminate through max operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. + @param node: The tensor we want to max-reduce. + @param reduction_axes: The axes to eliminate through max operation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: Optional name for output node. """ return _get_node_factory_opset1().create( "ReduceMax", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} @@ -2318,13 +2318,13 @@ def reduce_max( def reduce_mean( node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None ) -> Node: - """Mean-reduction operation on input tensor, eliminating the specified reduction axes. + """! Mean-reduction operation on input tensor, eliminating the specified reduction axes. - :param node: The tensor we want to mean-reduce. - :param reduction_axes: The axes to eliminate through mean operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing mean-reduction operation. + @param node: The tensor we want to mean-reduce. + @param reduction_axes: The axes to eliminate through mean operation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: Optional name for output node. + @return The new node performing mean-reduction operation. """ return _get_node_factory_opset1().create( "ReduceMean", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} @@ -2335,12 +2335,12 @@ def reduce_mean( def reduce_min( node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None ) -> Node: - """Min-reduction operation on input tensor, eliminating the specified reduction axes. + """! Min-reduction operation on input tensor, eliminating the specified reduction axes. - :param node: The tensor we want to min-reduce. - :param reduction_axes: The axes to eliminate through min operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. + @param node: The tensor we want to min-reduce. + @param reduction_axes: The axes to eliminate through min operation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: Optional name for output node. """ return _get_node_factory_opset1().create( "ReduceMin", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} @@ -2351,13 +2351,13 @@ def reduce_min( def reduce_prod( node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None ) -> Node: - """Product-reduction operation on input tensor, eliminating the specified reduction axes. + """! Product-reduction operation on input tensor, eliminating the specified reduction axes. - :param node: The tensor we want to product-reduce. - :param reduction_axes: The axes to eliminate through product operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing product-reduction operation. + @param node: The tensor we want to product-reduce. + @param reduction_axes: The axes to eliminate through product operation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: Optional name for output node. + @return The new node performing product-reduction operation. """ return _get_node_factory_opset1().create( "ReduceProd", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} @@ -2368,13 +2368,13 @@ def reduce_prod( def reduce_sum( node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None ) -> Node: - """Perform element-wise sums of the input tensor, eliminating the specified reduction axes. + """! Perform element-wise sums of the input tensor, eliminating the specified reduction axes. - :param node: The node providing data for operation. - :param reduction_axes: The axes to eliminate through summation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: The optional new name for output node. - :return: The new node performing summation along `reduction_axes` element-wise. + @param node: The node providing data for operation. + @param reduction_axes: The axes to eliminate through summation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: The optional new name for output node. + @return The new node performing summation along `reduction_axes` element-wise. """ return _get_node_factory_opset1().create( "ReduceSum", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} @@ -2394,19 +2394,19 @@ def region_yolo( anchors: List[float] = None, name: Optional[str] = None, ) -> Node: - """Return a node which produces the RegionYolo operation. + """! Return a node which produces the RegionYolo operation. - :param input: Input data - :param coords: Number of coordinates for each region - :param classes: Number of classes for each region - :param num: Number of regions - :param do_softmax: Compute softmax - :param mask: Mask - :param axis: Axis to begin softmax on - :param end_axis: Axis to end softmax on - :param anchors: A flattened list of pairs `[width, height]` that describes prior box sizes - :param name: Optional name for output node. - :return: RegionYolo node + @param input: Input data + @param coords: Number of coordinates for each region + @param classes: Number of classes for each region + @param num: Number of regions + @param do_softmax: Compute softmax + @param mask: Mask + @param axis: Axis to begin softmax on + @param end_axis: Axis to end softmax on + @param anchors: A flattened list of pairs `[width, height]` that describes prior box sizes + @param name: Optional name for output node. + @return RegionYolo node """ if anchors is None: anchors = [] @@ -2431,11 +2431,11 @@ def region_yolo( def reshape( node: NodeInput, output_shape: NodeInput, special_zero: bool, name: Optional[str] = None ) -> Node: - """Return reshaped node according to provided parameters. + """! Return reshaped node according to provided parameters. - :param node: The tensor we want to reshape. - :param output_shape: The node with a new shape for input tensor. - :param special_zero: The boolean variable that controls how zero values in shape are + @param node: The tensor we want to reshape. + @param output_shape: The node with a new shape for input tensor. + @param special_zero: The boolean variable that controls how zero values in shape are interpreted. If special_zero is false, then 0 is interpreted as-is which means that output shape will contain a zero dimension at the specified location. Input and output tensors are empty in this case. @@ -2450,10 +2450,10 @@ def reshape( @unary_op def result(data: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which represents an output of a graph (Function). + """! Return a node which represents an output of a graph (Function). - :param data: The tensor containing the input data - :return: Result node + @param data: The tensor containing the input data + @return Result node """ return _get_node_factory_opset1().create("Result", [data]) @@ -2466,13 +2466,13 @@ def reverse_sequence( seq_axis: NumericData, name: Optional[str] = None, ) -> Node: - """Return a node which produces a ReverseSequence operation. + """! Return a node which produces a ReverseSequence operation. - :param input: tensor with input data to reverse - :param seq_lengths: 1D tensor of integers with sequence lengths in the input tensor. - :param batch_axis: index of the batch dimension. - :param seq_axis: index of the sequence dimension. - :return: ReverseSequence node + @param input: tensor with input data to reverse + @param seq_lengths: 1D tensor of integers with sequence lengths in the input tensor. + @param batch_axis: index of the batch dimension. + @param seq_axis: index of the sequence dimension. + @return ReverseSequence node """ return _get_node_factory_opset1().create( "ReverseSequence", @@ -2489,16 +2489,16 @@ def select( auto_broadcast: str = "numpy", name: Optional[str] = None, ) -> Node: - """Perform an element-wise selection operation on input tensors. + """! Perform an element-wise selection operation on input tensors. - :param cond: Tensor with selection mask of type `boolean`. - :param then_node: Tensor providing data to be selected if respective `cond` + @param cond: Tensor with selection mask of type `boolean`. + @param then_node: Tensor providing data to be selected if respective `cond` item value is `True`. - :param else_node: Tensor providing data to be selected if respective `cond` + @param else_node: Tensor providing data to be selected if respective `cond` item value is `False`. - :param auto_broadcast: Mode specifies rules used for auto-broadcasting of input tensors. - :param name: The optional new name for output node. - :return: The new node with values selected according to provided arguments. + @param auto_broadcast: Mode specifies rules used for auto-broadcasting of input tensors. + @param name: The optional new name for output node. + @return The new node with values selected according to provided arguments. """ inputs = as_nodes(cond, then_node, else_node) return _get_node_factory_opset1().create( @@ -2512,44 +2512,44 @@ def select( def selu( data: NodeInput, alpha: NodeInput, lambda_value: NodeInput, name: Optional[str] = None ) -> Node: - """Perform a Scaled Exponential Linear Unit (SELU) operation on input node element-wise. + """! Perform a Scaled Exponential Linear Unit (SELU) operation on input node element-wise. - :param data: input node, array or scalar. - :param alpha: Alpha coefficient of SELU operation - :param lambda_value: Lambda coefficient of SELU operation - :param name: The optional output node name. - :return: The new node performing relu operation on its input element-wise. + @param data: input node, array or scalar. + @param alpha: Alpha coefficient of SELU operation + @param lambda_value: Lambda coefficient of SELU operation + @param name: The optional output node name. + @return The new node performing relu operation on its input element-wise. """ return _get_node_factory_opset1().create("Selu", as_nodes(data, alpha, lambda_value)) @nameable_op def shape_of(data: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which produces a tensor containing the shape of its input data. + """! Return a node which produces a tensor containing the shape of its input data. - :param data: The tensor containing the input data. - :return: ShapeOf node + @param data: The tensor containing the input data. + @return ShapeOf node """ return _get_node_factory_opset1().create("ShapeOf", [as_node(data)]) @unary_op def sigmoid(data: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which applies the sigmoid function element-wise. + """! Return a node which applies the sigmoid function element-wise. - :param data: The tensor containing the input data - :return: Sigmoid node + @param data: The tensor containing the input data + @return Sigmoid node """ return _get_node_factory_opset1().create("Sigmoid", [data]) @unary_op def sign(node: NodeInput, name: Optional[str] = None) -> Node: - """Perform element-wise sign operation. + """! Perform element-wise sign operation. - :param node: One of: input node, array or scalar. - :param name: The optional new name for output node. - :return: The node with mapped elements of the input tensor to -1 (if it is negative), + @param node: One of: input node, array or scalar. + @param name: The optional new name for output node. + @return The node with mapped elements of the input tensor to -1 (if it is negative), 0 (if it is zero), or 1 (if it is positive). """ return _get_node_factory_opset1().create("Sign", [node]) @@ -2557,54 +2557,54 @@ def sign(node: NodeInput, name: Optional[str] = None) -> Node: @unary_op def sin(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply sine function on the input node element-wise. + """! Apply sine function on the input node element-wise. - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with sin operation applied on it. + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with sin operation applied on it. """ return _get_node_factory_opset1().create("Sin", [node]) @unary_op def sinh(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply hyperbolic sine function on the input node element-wise. + """! Apply hyperbolic sine function on the input node element-wise. - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with sin operation applied on it. + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with sin operation applied on it. """ return _get_node_factory_opset1().create("Sinh", [node]) @nameable_op def softmax(data: NodeInput, axis: int, name: Optional[str] = None) -> Node: - """Apply softmax operation on each element of input tensor. + """! Apply softmax operation on each element of input tensor. - :param data: The tensor providing input data. - :param axis: An axis along which Softmax should be calculated - :return: The new node with softmax operation applied on each element. + @param data: The tensor providing input data. + @param axis: An axis along which Softmax should be calculated + @return The new node with softmax operation applied on each element. """ return _get_node_factory_opset1().create("Softmax", [as_node(data)], {"axis": axis}) @nameable_op def space_to_depth(data: Node, mode: str, block_size: int = 1, name: str = None) -> Node: - """Perform SpaceToDepth operation on the input tensor. + """! Perform SpaceToDepth operation on the input tensor. SpaceToDepth rearranges blocks of spatial data into depth. The operator returns a copy of the input tensor where values from the height and width dimensions are moved to the depth dimension. - :param data: The node with data tensor. - :param mode: Specifies how the output depth dimension is gathered from block coordinates. + @param data: The node with data tensor. + @param mode: Specifies how the output depth dimension is gathered from block coordinates. blocks_first: The output depth is gathered from [block_size, ..., block_size, C] depth_first: The output depth is gathered from [C, block_size, ..., block_size] - :param block_size: The size of the block of values to be moved. Scalar value. - :param name: Optional output node name. - :return: The new node performing a SpaceToDepth operation on input tensor. + @param block_size: The size of the block of values to be moved. Scalar value. + @param name: Optional output node name. + @return The new node performing a SpaceToDepth operation on input tensor. """ return _get_node_factory_opset1().create( "SpaceToDepth", [data], {"mode": mode, "block_size": block_size}, @@ -2613,12 +2613,12 @@ def space_to_depth(data: Node, mode: str, block_size: int = 1, name: str = None) @nameable_op def split(data: NodeInput, axis: NodeInput, num_splits: int, name: Optional[str] = None) -> Node: - """Return a node which splits the input tensor into same-length slices. + """! Return a node which splits the input tensor into same-length slices. - :param data: The input tensor to be split - :param axis: Axis along which the input data will be split - :param num_splits: Number of the output tensors that should be produced - :return: Split node + @param data: The input tensor to be split + @param axis: Axis along which the input data will be split + @param num_splits: Number of the output tensors that should be produced + @return Split node """ return _get_node_factory_opset1().create( "Split", @@ -2629,11 +2629,11 @@ def split(data: NodeInput, axis: NodeInput, num_splits: int, name: Optional[str] @unary_op def sqrt(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies square root to the input node element-wise. + """! Return node which applies square root to the input node element-wise. - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: The new node with sqrt operation applied element-wise. + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return The new node with sqrt operation applied element-wise. """ return _get_node_factory_opset1().create("Sqrt", [node]) @@ -2642,16 +2642,16 @@ def sqrt(node: NodeInput, name: Optional[str] = None) -> Node: def squared_difference( x1: NodeInput, x2: NodeInput, auto_broadcast: str = "NUMPY", name: Optional[str] = None ) -> Node: - """Perform an element-wise squared difference between two tensors. + """! Perform an element-wise squared difference between two tensors. - .. math:: y[i] = (x_1[i] - x_2[i])^2 + \f[ y[i] = (x_1[i] - x_2[i])^2 \f] - :param x1: The node with first input tensor. - :param x2: The node with second input tensor. - :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes + @param x1: The node with first input tensor. + @param x2: The node with second input tensor. + @param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes to output shape axes. Range of values: numpy, explicit. - :param name: Optional new name for output node. - :return: The new node performing a squared difference between two tensors. + @param name: Optional new name for output node. + @return The new node performing a squared difference between two tensors. """ return _get_node_factory_opset1().create( "SquaredDifference", [x1, x2], {"auto_broadcast": auto_broadcast.upper()} @@ -2660,12 +2660,12 @@ def squared_difference( @nameable_op def squeeze(data: NodeInput, axes: NodeInput, name: Optional[str] = None) -> Node: - """Perform squeeze operation on input tensor. + """! Perform squeeze operation on input tensor. Remove single-dimensional entries from the shape of a tensor. - Takes a parameter :code:`axes` with a list of axes to squeeze. - If :code:`axes` is not provided, all the single dimensions will be removed from the shape. - If an :code:`axis` is selected with shape entry not equal to one, an error is raised. + Takes a parameter `axes` with a list of axes to squeeze. + If `axes` is not provided, all the single dimensions will be removed from the shape. + If an `axis` is selected with shape entry not equal to one, an error is raised. For example: @@ -2674,11 +2674,11 @@ def squeeze(data: NodeInput, axes: NodeInput, name: Optional[str] = None) -> Nod Result: tensor with shape [1, 2, 3, 1] - :param data: The node with data tensor. - :param axes: List of non-negative integers, indicate the dimensions to squeeze. + @param data: The node with data tensor. + @param axes: List of non-negative integers, indicate the dimensions to squeeze. One of: input node or array. - :param name: Optional new name for output node. - :return: The new node performing a squeeze operation on input tensor. + @param name: Optional new name for output node. + @return The new node performing a squeeze operation on input tensor. """ return _get_node_factory_opset1().create("Squeeze", as_nodes(data, axes)) @@ -2696,20 +2696,20 @@ def strided_slice( ellipsis_mask: Optional[List[int]] = None, name: Optional[str] = None, ) -> Node: - """Return a node which dynamically repeats(replicates) the input data tensor. + """! Return a node which dynamically repeats(replicates) the input data tensor. - :param data: The tensor to be sliced - :param begin: 1D tensor with begin indexes for input blob slicing - :param end: 1D tensor with end indexes for input blob slicing - :param strides: The slicing strides - :param begin_mask: A mask applied to the 'begin' input indicating which elements + @param data: The tensor to be sliced + @param begin: 1D tensor with begin indexes for input blob slicing + @param end: 1D tensor with end indexes for input blob slicing + @param strides: The slicing strides + @param begin_mask: A mask applied to the 'begin' input indicating which elements shoud be ignored - :param end_mask: A mask applied to the 'end' input indicating which elements + @param end_mask: A mask applied to the 'end' input indicating which elements shoud be ignored - :param new_axis_mask: A mask indicating dimensions where '1' should be inserted - :param shrink_axis_mask: A mask indicating which dimensions should be deleted - :param ellipsis_mask: Indicates positions where missing dimensions should be inserted - :returns: StridedSlice node + @param new_axis_mask: A mask indicating dimensions where '1' should be inserted + @param shrink_axis_mask: A mask indicating which dimensions should be deleted + @param ellipsis_mask: Indicates positions where missing dimensions should be inserted + @return StridedSlice node """ if new_axis_mask is None: new_axis_mask = [] @@ -2737,14 +2737,14 @@ def subtract( auto_broadcast: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Return node which applies f(x) = A-B to the input nodes element-wise. + """! Return node which applies f(x) = A-B to the input nodes element-wise. - :param left_node: The node providing data for left hand side of operator. - :param right_node: The node providing data for right hand side of operator. - :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes + @param left_node: The node providing data for left hand side of operator. + @param right_node: The node providing data for right hand side of operator. + @param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes to output shape axes. Range of values: numpy, explicit. - :param name: The optional name for output node. - :return: The new output node performing subtraction operation on both tensors element-wise. + @param name: The optional name for output node. + @return The new output node performing subtraction operation on both tensors element-wise. """ return _get_node_factory_opset1().create( "Subtract", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} @@ -2753,22 +2753,22 @@ def subtract( @unary_op def tan(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply tangent function on the input node element-wise. + """! Apply tangent function on the input node element-wise. - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with tan operation applied on it. + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with tan operation applied on it. """ return _get_node_factory_opset1().create("Tan", [node]) @unary_op def tanh(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies hyperbolic tangent to the input node element-wise. + """! Return node which applies hyperbolic tangent to the input node element-wise. - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with tanh operation applied on it. + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with tanh operation applied on it. """ return _get_node_factory_opset1().create("Tanh", [node]) @@ -2787,23 +2787,23 @@ def tensor_iterator( """ Perform recurrent execution of the network described in the body, iterating through the data. - :param inputs: The provided to TensorIterator operator. - :param graph_body: The graph representing the body we execute. - :param slice_input_desc: The descriptors describing sliced inputs, that is nodes + @param inputs: The provided to TensorIterator operator. + @param graph_body: The graph representing the body we execute. + @param slice_input_desc: The descriptors describing sliced inputs, that is nodes representing tensors we iterate through, processing single data slice in one iteration. - :param merged_input_desc: The descriptors describing merged inputs, that is nodes + @param merged_input_desc: The descriptors describing merged inputs, that is nodes representing variables with initial value at first iteration, which may be changing through iterations. - :param invariant_input_desc: The descriptors describing invariant inputs, that is nodes + @param invariant_input_desc: The descriptors describing invariant inputs, that is nodes representing variable with persistent value through all iterations. - :param body_output_desc: The descriptors describing body outputs from specified + @param body_output_desc: The descriptors describing body outputs from specified iteration. - :param concat_output_desc: The descriptors describing specified output values through + @param concat_output_desc: The descriptors describing specified output values through all the iterations concatenated into one node. - :param name: The optional name for output node. - :returns: Node representing TensorIterator operation. + @param name: The optional name for output node. + @return Node representing TensorIterator operation. """ attributes = { "body": graph_body.serialize(), @@ -2819,11 +2819,11 @@ def tensor_iterator( @nameable_op def tile(data: NodeInput, repeats: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which dynamically repeats(replicates) the input data tensor. + """! Return a node which dynamically repeats(replicates) the input data tensor. - :param data: The input tensor to be tiled - :param repeats: Per-dimension replication factors - :return: Tile node + @param data: The input tensor to be tiled + @param repeats: Per-dimension replication factors + @return Tile node """ return _get_node_factory_opset1().create("Tile", as_nodes(data, repeats)) @@ -2837,14 +2837,14 @@ def topk( sort: str, name: Optional[str] = None, ) -> Node: - """Return a node which performs TopK. + """! Return a node which performs TopK. - :param data: Input data. - :param k: K. - :param axis: TopK Axis. - :param mode: Compute TopK largest ('max') or smallest ('min') - :param sort: Order of output elements (sort by: 'none', 'index' or 'value') - :return: The new node which performs TopK (both indices and values) + @param data: Input data. + @param k: K. + @param axis: TopK Axis. + @param mode: Compute TopK largest ('max') or smallest ('min') + @param sort: Order of output elements (sort by: 'none', 'index' or 'value') + @return The new node which performs TopK (both indices and values) """ return _get_node_factory_opset1().create( "TopK", @@ -2855,17 +2855,17 @@ def topk( @nameable_op def transpose(data: NodeInput, input_order: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which transposes the data in the input tensor. + """! Return a node which transposes the data in the input tensor. - :param data: The input tensor to be transposed - :param input_order: Permutation of axes to be applied to the input tensor - :return: Transpose node + @param data: The input tensor to be transposed + @param input_order: Permutation of axes to be applied to the input tensor + @return Transpose node """ return _get_node_factory_opset1().create("Transpose", as_nodes(data, input_order)) def unsqueeze(data: NodeInput, axes: NodeInput, name: Optional[str] = None) -> Node: - """Perform unsqueeze operation on input tensor. + """! Perform unsqueeze operation on input tensor. Insert single-dimensional entries to the shape of a tensor. Takes one required argument axes, a list of dimensions that will be inserted. @@ -2874,10 +2874,10 @@ def unsqueeze(data: NodeInput, axes: NodeInput, name: Optional[str] = None) -> N For example: Inputs: tensor with shape [3, 4, 5], axes=[0, 4] Result: tensor with shape [1, 3, 4, 5, 1] - :param data: The node with data tensor. - :param axes: List of non-negative integers, indicate the dimensions to be inserted. + @param data: The node with data tensor. + @param axes: List of non-negative integers, indicate the dimensions to be inserted. One of: input node or array. - :return: The new node performing an unsqueeze operation on input tensor. + @return The new node performing an unsqueeze operation on input tensor. """ return _get_node_factory_opset1().create("Unsqueeze", as_nodes(data, axes)) @@ -2886,11 +2886,11 @@ def unsqueeze(data: NodeInput, axes: NodeInput, name: Optional[str] = None) -> N def variadic_split( data: NodeInput, axis: NodeInput, split_lengths: NodeInput, name: Optional[str] = None ) -> Node: - """Return a node which splits the input tensor into variadic length slices. + """! Return a node which splits the input tensor into variadic length slices. - :param data: The input tensor to be split - :param axis: Axis along which the input data will be split - :param split_lengths: Sizes of the output tensors along the split axis - :return: VariadicSplit node + @param data: The input tensor to be split + @param axis: Axis along which the input data will be split + @param split_lengths: Sizes of the output tensors along the split axis + @return VariadicSplit node """ return _get_node_factory_opset1().create("VariadicSplit", as_nodes(data, axis, split_lengths)) diff --git a/ngraph/python/src/ngraph/opset2/ops.py b/ngraph/python/src/ngraph/opset2/ops.py index 2d01aa3295e688..ec49c9113b98be 100644 --- a/ngraph/python/src/ngraph/opset2/ops.py +++ b/ngraph/python/src/ngraph/opset2/ops.py @@ -14,7 +14,7 @@ # limitations under the License. # ****************************************************************************** -"""Factory functions for all ngraph ops.""" +"""! Factory functions for all ngraph ops.""" from typing import Callable, Iterable, List, Optional, Set, Union import numpy as np @@ -66,16 +66,16 @@ def batch_to_space( crops_end: NodeInput, name: Optional[str] = None, ) -> Node: - """Perform BatchToSpace operation on the input tensor. + """! Perform BatchToSpace operation on the input tensor. BatchToSpace permutes data from the batch dimension of the data tensor into spatial dimensions. - :param data: Node producing the data tensor. - :param block_shape: The sizes of the block of values to be moved. - :param crops_begin: Specifies the amount to crop from the beginning along each axis of `data`. - :param crops_end: Specifies the amount to crop from the end along each axis of `data`. - :param name: Optional output node name. - :return: The new node performing a BatchToSpace operation. + @param data: Node producing the data tensor. + @param block_shape: The sizes of the block of values to be moved. + @param crops_begin: Specifies the amount to crop from the beginning along each axis of `data`. + @param crops_end: Specifies the amount to crop from the end along each axis of `data`. + @param name: Optional output node name. + @return The new node performing a BatchToSpace operation. """ return _get_node_factory_opset2().create( "BatchToSpace", as_nodes(data, block_shape, crops_begin, crops_end) @@ -84,18 +84,18 @@ def batch_to_space( @unary_op def gelu(node: NodeInput, name: Optional[str] = None) -> Node: - r"""Perform Gaussian Error Linear Unit operation element-wise on data from input node. + r"""! Perform Gaussian Error Linear Unit operation element-wise on data from input node. Computes GELU function: - .. math:: f(x) = 0.5\cdot x\cdot(1 + erf( \dfrac{x}{\sqrt{2}}) + \f[ f(x) = 0.5\cdot x\cdot(1 + erf( \dfrac{x}{\sqrt{2}}) \f] For more information refer to: `Gaussian Error Linear Unit (GELU) `_ - :param node: Input tensor. One of: input node, array or scalar. - :param name: Optional output node name. - :return: The new node performing a GELU operation on its input data element-wise. + @param node: Input tensor. One of: input node, array or scalar. + @param name: Optional output node name. + @return The new node performing a GELU operation on its input data element-wise. """ return _get_node_factory_opset2().create("Gelu", [node]) @@ -108,19 +108,19 @@ def mvn( eps: float = 1e-9, name: str = None, ) -> Node: - r"""Perform Mean Variance Normalization operation on data from input node. + r"""! Perform Mean Variance Normalization operation on data from input node. - Computes MVN on the input tensor :code:`data` (called `X`) using formula: + Computes MVN on the input tensor `data` (called `X`) using formula: - .. math:: Y = \dfrac{X-EX}{\sqrt{E(X-EX)^2}} + \f[ Y = \dfrac{X-EX}{\sqrt{E(X-EX)^2}} \f] - :param data: The node with data tensor. - :param across_channels: Denotes if mean values are shared across channels. - :param normalize_variance: Denotes whether to perform variance normalization. - :param eps: The number added to the variance to avoid division by zero + @param data: The node with data tensor. + @param across_channels: Denotes if mean values are shared across channels. + @param normalize_variance: Denotes whether to perform variance normalization. + @param eps: The number added to the variance to avoid division by zero when normalizing the value. Scalar value. - :param name: Optional output node name. - :return: The new node performing a MVN operation on input tensor. + @param name: Optional output node name. + @return The new node performing a MVN operation on input tensor. """ return _get_node_factory_opset2().create( "MVN", @@ -131,12 +131,12 @@ def mvn( @nameable_op def reorg_yolo(input: Node, stride: List[int], name: Optional[str] = None) -> Node: - """Return a node which produces the ReorgYolo operation. + """! Return a node which produces the ReorgYolo operation. - :param input: Input data - :param stride: Stride to reorganize input by - :param name: Optional name for output node. - :return: ReorgYolo node + @param input: Input data + @param stride: Stride to reorganize input by + @param name: Optional name for output node. + @return ReorgYolo node """ return _get_node_factory_opset2().create("ReorgYolo", [input], {"stride": stride}) @@ -150,14 +150,14 @@ def roi_pooling( method: str, name: Optional[str] = None, ) -> Node: - """Return a node which produces an ROIPooling operation. - - :param input: Input feature map {N, C, ...} - :param coords: Coordinates of bounding boxes - :param output_size: Height/Width of ROI output features (shape) - :param spatial_scale: Ratio of input feature map over input image size (float) - :param method: Method of pooling - string: "max" or "bilinear" - :return: ROIPooling node + """! Return a node which produces an ROIPooling operation. + + @param input: Input feature map {N, C, ...} + @param coords: Coordinates of bounding boxes + @param output_size: Height/Width of ROI output features (shape) + @param spatial_scale: Ratio of input feature map over input image size (float) + @param method: Method of pooling - string: "max" or "bilinear" + @return ROIPooling node """ method = method.lower() return _get_node_factory_opset2().create( @@ -175,18 +175,18 @@ def space_to_batch( pads_end: NodeInput, name: Optional[str] = None, ) -> Node: - """Perform SpaceToBatch operation on the input tensor. + """! Perform SpaceToBatch operation on the input tensor. SpaceToBatch permutes data tensor blocks of spatial data into batch dimension. The operator returns a copy of the input tensor where values from spatial blocks dimensions are moved in the batch dimension - :param data: Node producing the data tensor. - :param block_shape: The sizes of the block of values to be moved. - :param pads_begin: Specifies the padding for the beginning along each axis of `data`. - :param pads_end: Specifies the padding for the ending along each axis of `data`. - :param name: Optional output node name. - :return: The new node performing a SpaceToBatch operation. + @param data: Node producing the data tensor. + @param block_shape: The sizes of the block of values to be moved. + @param pads_begin: Specifies the padding for the beginning along each axis of `data`. + @param pads_end: Specifies the padding for the ending along each axis of `data`. + @param name: Optional output node name. + @return The new node performing a SpaceToBatch operation. """ return _get_node_factory_opset2().create( "SpaceToBatch", as_nodes(data, block_shape, pads_begin, pads_end) diff --git a/ngraph/python/src/ngraph/opset3/ops.py b/ngraph/python/src/ngraph/opset3/ops.py index 1621a3320a11a2..d58241438a85c0 100644 --- a/ngraph/python/src/ngraph/opset3/ops.py +++ b/ngraph/python/src/ngraph/opset3/ops.py @@ -14,7 +14,7 @@ # limitations under the License. # ****************************************************************************** -"""Factory functions for all ngraph ops.""" +"""! Factory functions for all ngraph ops.""" from typing import Callable, Iterable, List, Optional, Set, Union import numpy as np @@ -60,12 +60,12 @@ @nameable_op def assign(new_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node: - """Return a node which produces the Assign operation. + """! Return a node which produces the Assign operation. - :param new_value: Node producing a value to be assigned to a variable. - :param variable_id: Id of a variable to be updated. - :param name: Optional name for output node. - :return: Assign node + @param new_value: Node producing a value to be assigned to a variable. + @param variable_id: Id of a variable to be updated. + @param name: Optional name for output node. + @return Assign node """ return _get_node_factory_opset3().create( "Assign", @@ -82,16 +82,16 @@ def broadcast( broadcast_spec: str = "NUMPY", name: Optional[str] = None, ) -> Node: - """Create a node which broadcasts the input node's values along specified axes to a desired shape. + """! Create a node which broadcasts the input node's values along specified axes to a desired shape. - :param data: The node with input tensor data. - :param target_shape: The node with a new shape we want to broadcast tensor to. - :param axes_mapping: The node with a axis positions (0-based) in the result + @param data: The node with input tensor data. + @param target_shape: The node with a new shape we want to broadcast tensor to. + @param axes_mapping: The node with a axis positions (0-based) in the result that are being broadcast. - :param broadcast_spec: The type of broadcasting that specifies mapping of input tensor axes + @param broadcast_spec: The type of broadcasting that specifies mapping of input tensor axes to output shape axes. Range of values: NUMPY, EXPLICIT, BIDIRECTIONAL. - :param name: Optional new name for output node. - :return: New node with broadcast shape. + @param name: Optional new name for output node. + @return New node with broadcast shape. """ inputs = as_nodes(data, target_shape) if broadcast_spec.upper() == "EXPLICIT": @@ -109,15 +109,15 @@ def bucketize( with_right_bound: bool = True, name: Optional[str] = None, ) -> Node: - """Return a node which produces the Bucketize operation. + """! Return a node which produces the Bucketize operation. - :param data: Input data to bucketize - :param buckets: 1-D of sorted unique boundaries for buckets - :param output_type: Output tensor type, "i64" or "i32", defaults to i64 - :param with_right_bound: indicates whether bucket includes the right or left + @param data: Input data to bucketize + @param buckets: 1-D of sorted unique boundaries for buckets + @param output_type: Output tensor type, "i64" or "i32", defaults to i64 + @param with_right_bound: indicates whether bucket includes the right or left edge of interval. default true = includes right edge - :param name: Optional name for output node. - :return: Bucketize node + @param name: Optional name for output node. + @return Bucketize node """ return _get_node_factory_opset3().create( "Bucketize", @@ -134,13 +134,13 @@ def cum_sum( reverse: bool = False, name: Optional[str] = None, ) -> Node: - """Construct a cumulative summation operation. + """! Construct a cumulative summation operation. - :param arg: The tensor to be summed. - :param axis: zero dimension tensor specifying axis position along which sum will be performed. - :param exclusive: if set to true, the top element is not included - :param reverse: if set to true, will perform the sums in reverse direction - :return: New node performing the operation + @param arg: The tensor to be summed. + @param axis: zero dimension tensor specifying axis position along which sum will be performed. + @param exclusive: if set to true, the top element is not included + @param reverse: if set to true, will perform the sums in reverse direction + @return New node performing the operation """ return _get_node_factory_opset3().create( "CumSum", as_nodes(arg, axis), {"exclusive": exclusive, "reverse": reverse} @@ -156,15 +156,15 @@ def embedding_bag_offsets_sum( per_sample_weights: Optional[NodeInput] = None, name: Optional[str] = None, ) -> Node: - """Return a node which performs sums of bags of embeddings without the intermediate embeddings. - - :param emb_table: Tensor containing the embedding lookup table. - :param indices: Tensor with indices. - :param offsets: Tensor containing the starting index positions of each bag in indices. - :param per_sample_weights: Tensor with weights for each sample. - :param default_index: Scalar containing default index in embedding table to fill empty bags. - :param name: Optional name for output node. - :return: The new node which performs EmbeddingBagOffsetsSum + """! Return a node which performs sums of bags of embeddings without the intermediate embeddings. + + @param emb_table: Tensor containing the embedding lookup table. + @param indices: Tensor with indices. + @param offsets: Tensor containing the starting index positions of each bag in indices. + @param per_sample_weights: Tensor with weights for each sample. + @param default_index: Scalar containing default index in embedding table to fill empty bags. + @param name: Optional name for output node. + @return The new node which performs EmbeddingBagOffsetsSum """ inputs = [emb_table, as_node(indices), as_node(offsets)] if per_sample_weights is not None: @@ -183,16 +183,16 @@ def embedding_bag_packed_sum( per_sample_weights: Optional[NodeInput] = None, name: Optional[str] = None, ) -> Node: - """Return an EmbeddingBagPackedSum node. + """! Return an EmbeddingBagPackedSum node. EmbeddingSegmentsSum constructs an output tensor by replacing every index in a given input tensor with a row (from the weights matrix) at that index - :param emb_table: Tensor containing the embedding lookup table. - :param indices: Tensor with indices. - :param per_sample_weights: Weights to be multiplied with embedding table. - :param name: Optional name for output node. - :return: EmbeddingBagPackedSum node + @param emb_table: Tensor containing the embedding lookup table. + @param indices: Tensor with indices. + @param per_sample_weights: Weights to be multiplied with embedding table. + @param name: Optional name for output node. + @return EmbeddingBagPackedSum node """ inputs = [as_node(emb_table), as_node(indices)] if per_sample_weights is not None: @@ -211,19 +211,19 @@ def embedding_segments_sum( per_sample_weights: Optional[NodeInput] = None, name: Optional[str] = None, ) -> Node: - """Return an EmbeddingSegmentsSum node. + """! Return an EmbeddingSegmentsSum node. EmbeddingSegmentsSum constructs an output tensor by replacing every index in a given input tensor with a row (from the weights matrix) at that index - :param emb_table: Tensor containing the embedding lookup table. - :param indices: Tensor with indices. - :param segment_ids: Tensor with indices into the output Tensor - :param num_segments: Tensor with number of segments. - :param default_index: Scalar containing default index in embedding table to fill empty bags. - :param per_sample_weights: Weights to be multiplied with embedding table. - :param name: Optional name for output node. - :return: EmbeddingSegmentsSum node + @param emb_table: Tensor containing the embedding lookup table. + @param indices: Tensor with indices. + @param segment_ids: Tensor with indices into the output Tensor + @param num_segments: Tensor with number of segments. + @param default_index: Scalar containing default index in embedding table to fill empty bags. + @param per_sample_weights: Weights to be multiplied with embedding table. + @param name: Optional name for output node. + @return EmbeddingSegmentsSum node """ inputs = [as_node(emb_table), as_node(indices), as_node(segment_ids)] if per_sample_weights is not None: @@ -248,15 +248,15 @@ def extract_image_patches( auto_pad: str, name: Optional[str] = None, ) -> Node: - """Return a node which produces the ExtractImagePatches operation. - - :param image: 4-D Input data to extract image patches. - :param sizes: Patch size in the format of [size_rows, size_cols]. - :param strides: Patch movement stride in the format of [stride_rows, stride_cols] - :param rates: Element seleciton rate for creating a patch. - :param auto_pad: Padding type. - :param name: Optional name for output node. - :return: ExtractImagePatches node + """! Return a node which produces the ExtractImagePatches operation. + + @param image: 4-D Input data to extract image patches. + @param sizes: Patch size in the format of [size_rows, size_cols]. + @param strides: Patch movement stride in the format of [stride_rows, stride_cols] + @param rates: Element seleciton rate for creating a patch. + @param auto_pad: Padding type. + @param name: Optional name for output node. + @return ExtractImagePatches node """ return _get_node_factory_opset3().create( "ExtractImagePatches", @@ -280,36 +280,36 @@ def gru_cell( linear_before_reset: bool = False, name: Optional[str] = None, ) -> Node: - """Perform GRUCell operation on the tensor from input node. + """! Perform GRUCell operation on the tensor from input node. GRUCell represents a single GRU Cell that computes the output using the formula described in the paper: https://arxiv.org/abs/1406.1078 Note this class represents only single *cell* and not whole *layer*. - :param X: The input tensor with shape: [batch_size, input_size]. - :param initial_hidden_state: The hidden state tensor at current time step with shape: + @param X: The input tensor with shape: [batch_size, input_size]. + @param initial_hidden_state: The hidden state tensor at current time step with shape: [batch_size, hidden_size]. - :param W: The weights for matrix multiplication, gate order: zrh. + @param W: The weights for matrix multiplication, gate order: zrh. Shape: [3*hidden_size, input_size]. - :param R: The recurrence weights for matrix multiplication. + @param R: The recurrence weights for matrix multiplication. Shape: [3*hidden_size, hidden_size]. - :param B: The sum of biases (weight and recurrence). + @param B: The sum of biases (weight and recurrence). For linear_before_reset set True the shape is [4*hidden_size]. Otherwise the shape is [3*hidden_size]. - :param hidden_size: The number of hidden units for recurrent cell. + @param hidden_size: The number of hidden units for recurrent cell. Specifies hidden state size. - :param activations: The vector of activation functions used inside recurrent cell. - :param activation_alpha: The vector of alpha parameters for activation functions in + @param activations: The vector of activation functions used inside recurrent cell. + @param activation_alpha: The vector of alpha parameters for activation functions in order respective to activation list. - :param activation_beta: The vector of beta parameters for activation functions in order + @param activation_beta: The vector of beta parameters for activation functions in order respective to activation list. - :param clip: The value defining clipping range [-clip, clip] on input of + @param clip: The value defining clipping range [-clip, clip] on input of activation functions. - :param linear_before_reset: Flag denotes if the layer behaves according to the modification + @param linear_before_reset: Flag denotes if the layer behaves according to the modification of GRUCell described in the formula in the ONNX documentation. - :param name: Optional output node name. - :returns: The new node performing a GRUCell operation on tensor from input node. + @param name: Optional output node name. + @return The new node performing a GRUCell operation on tensor from input node. """ if activations is None: activations = ["relu", "sigmoid", "tanh"] @@ -342,19 +342,19 @@ def non_max_suppression( output_type: str = "i64", name: Optional[str] = None, ) -> Node: - """Return a node which performs NonMaxSuppression. + """! Return a node which performs NonMaxSuppression. - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param max_output_boxes_per_class: Tensor Specifying maximum number of boxes + @param boxes: Tensor with box coordinates. + @param scores: Tensor with box scores. + @param max_output_boxes_per_class: Tensor Specifying maximum number of boxes to be selected per class. - :param iou_threshold: Tensor specifying intersection over union threshold - :param score_threshold: Tensor specifying minimum score to consider box for the processing. - :param box_encoding: Format of boxes data encoding. - :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected + @param iou_threshold: Tensor specifying intersection over union threshold + @param score_threshold: Tensor specifying minimum score to consider box for the processing. + @param box_encoding: Format of boxes data encoding. + @param sort_result_descending: Flag that specifies whenever it is necessary to sort selected boxes across batches or not. - :param output_type: Output element type. - :return: The new node which performs NonMaxSuppression + @param output_type: Output element type. + @return The new node which performs NonMaxSuppression """ if max_output_boxes_per_class is None: max_output_boxes_per_class = make_constant_node(0, np.int64) @@ -375,12 +375,12 @@ def non_max_suppression( @nameable_op def non_zero(data: NodeInput, output_type: str = "i64", name: Optional[str] = None,) -> Node: - """Return the indices of the elements that are non-zero. + """! Return the indices of the elements that are non-zero. - :param data: Input data. - :param output_type: Output tensor type. + @param data: Input data. + @param output_type: Output tensor type. - :return: The new node which performs NonZero + @return The new node which performs NonZero """ return _get_node_factory_opset3().create( "NonZero", @@ -391,12 +391,12 @@ def non_zero(data: NodeInput, output_type: str = "i64", name: Optional[str] = No @nameable_op def read_value(init_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node: - """Return a node which produces the Assign operation. + """! Return a node which produces the Assign operation. - :param init_value: Node producing a value to be returned instead of an unassigned variable. - :param variable_id: Id of a variable to be read. - :param name: Optional name for output node. - :return: ReadValue node + @param init_value: Node producing a value to be returned instead of an unassigned variable. + @param variable_id: Id of a variable to be read. + @param name: Optional name for output node. + @return ReadValue node """ return _get_node_factory_opset3().create( "ReadValue", @@ -419,31 +419,31 @@ def rnn_cell( clip: float = 0.0, name: Optional[str] = None, ) -> Node: - """Perform RNNCell operation on tensor from input node. + """! Perform RNNCell operation on tensor from input node. It follows notation and equations defined as in ONNX standard: https://github.com/onnx/onnx/blob/master/docs/Operators.md#RNN Note this class represents only single *cell* and not whole RNN *layer*. - :param X: The input tensor with shape: [batch_size, input_size]. - :param initial_hidden_state: The hidden state tensor at current time step with shape: + @param X: The input tensor with shape: [batch_size, input_size]. + @param initial_hidden_state: The hidden state tensor at current time step with shape: [batch_size, hidden_size]. - :param W: The weight tensor with shape: [hidden_size, input_size]. - :param R: The recurrence weight tensor with shape: [hidden_size, + @param W: The weight tensor with shape: [hidden_size, input_size]. + @param R: The recurrence weight tensor with shape: [hidden_size, hidden_size]. - :param B: The bias tensor for input gate with shape: [2*hidden_size]. - :param hidden_size: The number of hidden units for recurrent cell. + @param B: The bias tensor for input gate with shape: [2*hidden_size]. + @param hidden_size: The number of hidden units for recurrent cell. Specifies hidden state size. - :param activations: The vector of activation functions used inside recurrent cell. - :param activation_alpha: The vector of alpha parameters for activation functions in + @param activations: The vector of activation functions used inside recurrent cell. + @param activation_alpha: The vector of alpha parameters for activation functions in order respective to activation list. - :param activation_beta: The vector of beta parameters for activation functions in order + @param activation_beta: The vector of beta parameters for activation functions in order respective to activation list. - :param clip: The value defining clipping range [-clip, clip] on input of + @param clip: The value defining clipping range [-clip, clip] on input of activation functions. - :param name: Optional output node name. - :returns: The new node performing a RNNCell operation on tensor from input node. + @param name: Optional output node name. + @return The new node performing a RNNCell operation on tensor from input node. """ if activations is None: activations = ["sigmoid", "tanh"] @@ -475,20 +475,20 @@ def roi_align( mode: str, name: Optional[str] = None, ) -> Node: - """Return a node which performs ROIAlign. + """! Return a node which performs ROIAlign. - :param data: Input data. - :param rois: RoIs (Regions of Interest) to pool over. - :param batch_indices: Tensor with each element denoting the index of + @param data: Input data. + @param rois: RoIs (Regions of Interest) to pool over. + @param batch_indices: Tensor with each element denoting the index of the corresponding image in the batch. - :param pooled_h: Height of the ROI output feature map. - :param pooled_w: Width of the ROI output feature map. - :param sampling_ratio: Number of bins over height and width to use to calculate + @param pooled_h: Height of the ROI output feature map. + @param pooled_w: Width of the ROI output feature map. + @param sampling_ratio: Number of bins over height and width to use to calculate each output feature map element. - :param spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates. - :param mode: Method to perform pooling to produce output feature map elements. + @param spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates. + @param mode: Method to perform pooling to produce output feature map elements. - :return: The new node which performs ROIAlign + @return The new node which performs ROIAlign """ inputs = as_nodes(data, rois, batch_indices) attributes = { @@ -509,7 +509,7 @@ def scatter_elements_update( axis: NodeInput, name: Optional[str] = None, ) -> Node: - """Return a node which produces a ScatterElementsUpdate operation. + """! Return a node which produces a ScatterElementsUpdate operation. ScatterElementsUpdate creates a copy of the first input tensor with updated elements specified with second and third input tensors. @@ -521,11 +521,11 @@ def scatter_elements_update( corresponding entry in `indices` and the index-value for dimension not equal to `axis` is obtained from the index of the entry itself. - :param data: The input tensor to be updated. - :param indices: The tensor with indexes which will be updated. - :param updates: The tensor with update values. - :param axis: The axis for scatter. - :return: ScatterElementsUpdate node + @param data: The input tensor to be updated. + @param indices: The tensor with indexes which will be updated. + @param updates: The tensor with update values. + @param axis: The axis for scatter. + @return ScatterElementsUpdate node """ return _get_node_factory_opset3().create( "ScatterElementsUpdate", as_nodes(data, indices, updates, axis) @@ -536,15 +536,15 @@ def scatter_elements_update( def scatter_update( data: Node, indices: NodeInput, updates: NodeInput, axis: NodeInput, name: Optional[str] = None ) -> Node: - """Return a node which produces a ScatterUpdate operation. + """! Return a node which produces a ScatterUpdate operation. ScatterUpdate sets new values to slices from data addressed by indices. - :param data: The input tensor to be updated. - :param indices: The tensor with indexes which will be updated. - :param updates: The tensor with update values. - :param axis: The axis at which elements will be updated. - :return: ScatterUpdate node + @param data: The input tensor to be updated. + @param indices: The tensor with indexes which will be updated. + @param updates: The tensor with update values. + @param axis: The axis at which elements will be updated. + @return ScatterUpdate node """ return _get_node_factory_opset3().create( "ScatterUpdate", @@ -554,11 +554,11 @@ def scatter_update( @nameable_op def shape_of(data: NodeInput, output_type: str = "i64", name: Optional[str] = None) -> Node: - """Return a node which produces a tensor containing the shape of its input data. + """! Return a node which produces a tensor containing the shape of its input data. - :param data: The tensor containing the input data. + @param data: The tensor containing the input data. :para output_type: Output element type. - :return: ShapeOf node + @return ShapeOf node """ return _get_node_factory_opset3().create( "ShapeOf", @@ -569,21 +569,20 @@ def shape_of(data: NodeInput, output_type: str = "i64", name: Optional[str] = No @nameable_op def shuffle_channels(data: Node, axis: int, groups: int, name: Optional[str] = None) -> Node: - """Perform permutation on data in the channel dimension of the input tensor. + """! Perform permutation on data in the channel dimension of the input tensor. The operation is the equivalent with the following transformation of the input tensor - :code:`data` of shape [N, C, H, W]: + `data` of shape [N, C, H, W]: - :code:`data_reshaped` = reshape(:code:`data`, [N, group, C / group, H * W]) + `data_reshaped` = reshape(`data`, [N, group, C / group, H * W]) - :code:`data_trnasposed` = transpose(:code:`data_reshaped`, [0, 2, 1, 3]) + `data_trnasposed` = transpose(`data_reshaped`, [0, 2, 1, 3]) - :code:`output` = reshape(:code:`data_trnasposed`, [N, C, H, W]) + `output` = reshape(`data_trnasposed`, [N, C, H, W]) For example: - .. code-block:: python - + ~~~~~~~~~~~~~{.py} Inputs: tensor of shape [1, 6, 2, 2] data = [[[[ 0., 1.], [ 2., 3.]], @@ -604,15 +603,16 @@ def shuffle_channels(data: Node, axis: int, groups: int, name: Optional[str] = N [[ 4., 5.], [ 6., 7.]], [[12., 13.], [14., 15.]], [[20., 21.], [22., 23.]]]] + ~~~~~~~~~~~~~ - :param data: The node with input tensor. - :param axis: Channel dimension index in the data tensor. + @param data: The node with input tensor. + @param axis: Channel dimension index in the data tensor. A negative value means that the index should be calculated from the back of the input data shape. - :param group:The channel dimension specified by the axis parameter + @param group: The channel dimension specified by the axis parameter should be split into this number of groups. - :param name: Optional output node name. - :return: The new node performing a permutation on data in the channel dimension + @param name: Optional output node name. + @return The new node performing a permutation on data in the channel dimension of the input tensor. """ return _get_node_factory_opset3().create( @@ -630,15 +630,15 @@ def topk( index_element_type: str = "i32", name: Optional[str] = None, ) -> Node: - """Return a node which performs TopK. - - :param data: Input data. - :param k: K. - :param axis: TopK Axis. - :param mode: Compute TopK largest ('max') or smallest ('min') - :param sort: Order of output elements (sort by: 'none', 'index' or 'value') - :param index_element_type: Type of output tensor with indices. - :return: The new node which performs TopK (both indices and values) + """! Return a node which performs TopK. + + @param data: Input data. + @param k: K. + @param axis: TopK Axis. + @param mode: Compute TopK largest ('max') or smallest ('min') + @param sort: Order of output elements (sort by: 'none', 'index' or 'value') + @param index_element_type: Type of output tensor with indices. + @return The new node which performs TopK (both indices and values) """ return _get_node_factory_opset3().create( "TopK", diff --git a/ngraph/python/src/ngraph/opset4/ops.py b/ngraph/python/src/ngraph/opset4/ops.py index badc36095127b9..5e64f1ea547cdd 100644 --- a/ngraph/python/src/ngraph/opset4/ops.py +++ b/ngraph/python/src/ngraph/opset4/ops.py @@ -14,7 +14,7 @@ # limitations under the License. # ****************************************************************************** -"""Factory functions for all ngraph ops.""" +"""! Factory functions for all ngraph ops.""" from typing import Callable, Iterable, List, Optional, Set, Union import numpy as np @@ -70,17 +70,17 @@ def ctc_loss( unique: bool = False, name: Optional[str] = None, ) -> Node: - """Return a node which performs CTCLoss. - - :param logits: 3-D tensor of logits. - :param logit_length: 1-D tensor of lengths for each object from a batch. - :param labels: 2-D tensor of labels for which likelihood is estimated using logits. - :param label_length: 1-D tensor of length for each label sequence. - :param blank_index: Scalar used to mark a blank index. - :param preprocess_collapse_repeated: Flag for preprocessing labels before loss calculation. - :param ctc_merge_repeated: Flag for merging repeated characters in a potential alignment. - :param unique: Flag to find unique elements in a target. - :return: The new node which performs CTCLoss + """! Return a node which performs CTCLoss. + + @param logits: 3-D tensor of logits. + @param logit_length: 1-D tensor of lengths for each object from a batch. + @param labels: 2-D tensor of labels for which likelihood is estimated using logits. + @param label_length: 1-D tensor of length for each label sequence. + @param blank_index: Scalar used to mark a blank index. + @param preprocess_collapse_repeated: Flag for preprocessing labels before loss calculation. + @param ctc_merge_repeated: Flag for merging repeated characters in a potential alignment. + @param unique: Flag to find unique elements in a target. + @return The new node which performs CTCLoss """ if blank_index is not None: inputs = as_nodes(logits, logit_length, labels, label_length, blank_index) @@ -108,19 +108,19 @@ def non_max_suppression( output_type: str = "i64", name: Optional[str] = None, ) -> Node: - """Return a node which performs NonMaxSuppression. + """! Return a node which performs NonMaxSuppression. - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param max_output_boxes_per_class: Tensor Specifying maximum number of boxes + @param boxes: Tensor with box coordinates. + @param scores: Tensor with box scores. + @param max_output_boxes_per_class: Tensor Specifying maximum number of boxes to be selected per class. - :param iou_threshold: Tensor specifying intersection over union threshold - :param score_threshold: Tensor specifying minimum score to consider box for the processing. - :param box_encoding: Format of boxes data encoding. - :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected + @param iou_threshold: Tensor specifying intersection over union threshold + @param score_threshold: Tensor specifying minimum score to consider box for the processing. + @param box_encoding: Format of boxes data encoding. + @param sort_result_descending: Flag that specifies whenever it is necessary to sort selected boxes across batches or not. - :param output_type: Output element type. - :return: The new node which performs NonMaxSuppression + @param output_type: Output element type. + @return The new node which performs NonMaxSuppression """ if max_output_boxes_per_class is None: max_output_boxes_per_class = make_constant_node(0, np.int64) @@ -141,30 +141,30 @@ def non_max_suppression( @nameable_op def softplus(data: NodeInput, name: Optional[str] = None) -> Node: - """Apply SoftPlus operation on each element of input tensor. + """! Apply SoftPlus operation on each element of input tensor. - :param data: The tensor providing input data. - :return: The new node with SoftPlus operation applied on each element. + @param data: The tensor providing input data. + @return The new node with SoftPlus operation applied on each element. """ return _get_node_factory_opset4().create("SoftPlus", as_nodes(data), {}) @nameable_op def mish(data: NodeInput, name: Optional[str] = None,) -> Node: - """Return a node which performs Mish. + """! Return a node which performs Mish. - :param data: Tensor with input data floating point type. - :return: The new node which performs Mish + @param data: Tensor with input data floating point type. + @return The new node which performs Mish """ return _get_node_factory_opset4().create("Mish", as_nodes(data), {}) @nameable_op def hswish(data: NodeInput, name: Optional[str] = None,) -> Node: - """Return a node which performs HSwish (hard version of Swish). + """! Return a node which performs HSwish (hard version of Swish). - :param data: Tensor with input data floating point type. - :return: The new node which performs HSwish + @param data: Tensor with input data floating point type. + @return The new node which performs HSwish """ return _get_node_factory_opset4().create("HSwish", as_nodes(data), {}) @@ -175,10 +175,10 @@ def swish( beta: Optional[NodeInput] = None, name: Optional[str] = None, ) -> Node: - """Return a node which performing Swish activation function Swish(x, beta=1.0) = x * sigmoid(x * beta)). + """! Return a node which performing Swish activation function Swish(x, beta=1.0) = x * sigmoid(x * beta)). - :param data: Tensor with input data floating point type. - :return: The new node which performs Swish + @param data: Tensor with input data floating point type. + @return The new node which performs Swish """ if beta is None: beta = make_constant_node(1.0, np.float32) @@ -187,33 +187,33 @@ def swish( @nameable_op def acosh(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply hyperbolic inverse cosine function on the input node element-wise. + """! Apply hyperbolic inverse cosine function on the input node element-wise. - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arccosh operation applied on it. + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with arccosh operation applied on it. """ return _get_node_factory_opset4().create("Acosh", [node]) @nameable_op def asinh(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply hyperbolic inverse sinus function on the input node element-wise. + """! Apply hyperbolic inverse sinus function on the input node element-wise. - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arcsinh operation applied on it. + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with arcsinh operation applied on it. """ return _get_node_factory_opset4().create("Asinh", [node]) @nameable_op def atanh(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply hyperbolic inverse tangent function on the input node element-wise. + """! Apply hyperbolic inverse tangent function on the input node element-wise. - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arctanh operation applied on it. + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with arctanh operation applied on it. """ return _get_node_factory_opset4().create("Atanh", [node]) @@ -226,13 +226,13 @@ def proposal( attrs: dict, name: Optional[str] = None, ) -> Node: - """Filter bounding boxes and outputs only those with the highest prediction confidence. + """! Filter bounding boxes and outputs only those with the highest prediction confidence. - :param class_probs: 4D input floating point tensor with class prediction scores. - :param bbox_deltas: 4D input floating point tensor with corrected predictions of bounding boxes - :param image_shape: The 1D input tensor with 3 or 4 elements describing image shape. - :param attrs: The dictionary containing key, value pairs for attributes. - :param name: Optional name for the output node. + @param class_probs: 4D input floating point tensor with class prediction scores. + @param bbox_deltas: 4D input floating point tensor with corrected predictions of bounding boxes + @param image_shape: The 1D input tensor with 3 or 4 elements describing image shape. + @param attrs: The dictionary containing key, value pairs for attributes. + @param name: Optional name for the output node. * base_size The size of the anchor to which scale and ratio attributes are applied. Range of values: a positive unsigned integer number Default value: None @@ -309,7 +309,7 @@ def proposal( 'scale': [2, 3, 3, 4], } Optional attributes which are absent from dictionary will be set with corresponding default. - :return: Node representing Proposal operation. + @return Node representing Proposal operation. """ requirements = [ ("base_size", True, np.unsignedinteger, is_positive_value), @@ -339,13 +339,13 @@ def proposal( def reduce_l1( node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None ) -> Node: - """L1-reduction operation on input tensor, eliminating the specified reduction axes. + """! L1-reduction operation on input tensor, eliminating the specified reduction axes. - :param node: The tensor we want to mean-reduce. - :param reduction_axes: The axes to eliminate through mean operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing mean-reduction operation. + @param node: The tensor we want to mean-reduce. + @param reduction_axes: The axes to eliminate through mean operation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: Optional name for output node. + @return The new node performing mean-reduction operation. """ return _get_node_factory_opset4().create( "ReduceL1", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} @@ -356,13 +356,13 @@ def reduce_l1( def reduce_l2( node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None ) -> Node: - """L2-reduction operation on input tensor, eliminating the specified reduction axes. + """! L2-reduction operation on input tensor, eliminating the specified reduction axes. - :param node: The tensor we want to mean-reduce. - :param reduction_axes: The axes to eliminate through mean operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing mean-reduction operation. + @param node: The tensor we want to mean-reduce. + @param reduction_axes: The axes to eliminate through mean operation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: Optional name for output node. + @return The new node performing mean-reduction operation. """ return _get_node_factory_opset4().create( "ReduceL2", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} @@ -384,22 +384,22 @@ def lstm_cell( clip: float = 0.0, name: Optional[str] = None, ) -> Node: - """Return a node which performs LSTMCell operation. - - :param X: The input tensor with shape: [batch_size, input_size]. - :param initial_hidden_state: The hidden state tensor with shape: [batch_size, hidden_size]. - :param initial_cell_state: The cell state tensor with shape: [batch_size, hidden_size]. - :param W: The weight tensor with shape: [4*hidden_size, input_size]. - :param R: The recurrence weight tensor with shape: [4*hidden_size, hidden_size]. - :param B: The bias tensor for gates with shape: [4*hidden_size]. - :param hidden_size: Specifies hidden state size. - :param activations: The list of three activation functions for gates. - :param activations_alpha: The list of alpha parameters for activation functions. - :param activations_beta: The list of beta parameters for activation functions. - :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. - :param name: An optional name of the output node. - - :return: The new node represents LSTMCell. Node outputs count: 2. + """! Return a node which performs LSTMCell operation. + + @param X: The input tensor with shape: [batch_size, input_size]. + @param initial_hidden_state: The hidden state tensor with shape: [batch_size, hidden_size]. + @param initial_cell_state: The cell state tensor with shape: [batch_size, hidden_size]. + @param W: The weight tensor with shape: [4*hidden_size, input_size]. + @param R: The recurrence weight tensor with shape: [4*hidden_size, hidden_size]. + @param B: The bias tensor for gates with shape: [4*hidden_size]. + @param hidden_size: Specifies hidden state size. + @param activations: The list of three activation functions for gates. + @param activations_alpha: The list of alpha parameters for activation functions. + @param activations_beta: The list of beta parameters for activation functions. + @param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. + @param name: An optional name of the output node. + + @return The new node represents LSTMCell. Node outputs count: 2. """ if activations is None: activations = ["sigmoid", "tanh", "tanh"] diff --git a/ngraph/python/src/ngraph/opset_utils.py b/ngraph/python/src/ngraph/opset_utils.py index f487c72b63e993..49b0d29c4dfd00 100644 --- a/ngraph/python/src/ngraph/opset_utils.py +++ b/ngraph/python/src/ngraph/opset_utils.py @@ -27,7 +27,7 @@ def _get_node_factory(opset_version: Optional[str] = None) -> NodeFactory: - """Return NodeFactory configured to create operators from specified opset version.""" + """! Return NodeFactory configured to create operators from specified opset version.""" if opset_version: return NodeFactory(opset_version) else: diff --git a/ngraph/python/src/ngraph/utils/__init__.py b/ngraph/python/src/ngraph/utils/__init__.py index 1f257d1d90c921..65f6dfac3f6ac4 100644 --- a/ngraph/python/src/ngraph/utils/__init__.py +++ b/ngraph/python/src/ngraph/utils/__init__.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # ****************************************************************************** -"""Generic utilities. Factor related functions out to separate files.""" +"""! Generic utilities. Factor related functions out to separate files.""" diff --git a/ngraph/python/src/ngraph/utils/broadcasting.py b/ngraph/python/src/ngraph/utils/broadcasting.py index d1ce6a3414343c..8f52c8c6d1c845 100644 --- a/ngraph/python/src/ngraph/utils/broadcasting.py +++ b/ngraph/python/src/ngraph/utils/broadcasting.py @@ -26,16 +26,16 @@ def get_broadcast_axes( output_shape: TensorShape, input_shape: TensorShape, axis: int = None ) -> AxisSet: - """Generate a list of broadcast axes for ngraph++ broadcast. + """! Generate a list of broadcast axes for ngraph++ broadcast. Informally, a broadcast "adds" axes to the input tensor, replicating elements from the input tensor as needed to fill the new dimensions. Function calculate which of the output axes are added in this way. - :param output_shape: The new shape for the output tensor. - :param input_shape: The shape of input tensor. - :param axis: The axis along which we want to replicate elements. - :return: The indices of added axes. + @param output_shape: The new shape for the output tensor. + @param input_shape: The shape of input tensor. + @param axis: The axis along which we want to replicate elements. + @return The indices of added axes. """ axes_indexes = list(range(0, len(output_shape))) if axis is None: diff --git a/ngraph/python/src/ngraph/utils/decorators.py b/ngraph/python/src/ngraph/utils/decorators.py index 7dc3ad86c69416..cb59961394dd84 100644 --- a/ngraph/python/src/ngraph/utils/decorators.py +++ b/ngraph/python/src/ngraph/utils/decorators.py @@ -27,7 +27,7 @@ def _set_node_friendly_name(node: Node, **kwargs: Any) -> Node: def nameable_op(node_factory_function: Callable) -> Callable: - """Set the name to the ngraph operator returned by the wrapped function.""" + """! Set the name to the ngraph operator returned by the wrapped function.""" @wraps(node_factory_function) def wrapper(*args: Any, **kwargs: Any) -> Node: @@ -39,7 +39,7 @@ def wrapper(*args: Any, **kwargs: Any) -> Node: def unary_op(node_factory_function: Callable) -> Callable: - """Convert the first input value to a Constant Node if a numeric value is detected.""" + """! Convert the first input value to a Constant Node if a numeric value is detected.""" @wraps(node_factory_function) def wrapper(input_value: NodeInput, *args: Any, **kwargs: Any) -> Node: @@ -52,7 +52,7 @@ def wrapper(input_value: NodeInput, *args: Any, **kwargs: Any) -> Node: def binary_op(node_factory_function: Callable) -> Callable: - """Convert the first two input values to Constant Nodes if numeric values are detected.""" + """! Convert the first two input values to Constant Nodes if numeric values are detected.""" @wraps(node_factory_function) def wrapper(left: NodeInput, right: NodeInput, *args: Any, **kwargs: Any) -> Node: diff --git a/ngraph/python/src/ngraph/utils/input_validation.py b/ngraph/python/src/ngraph/utils/input_validation.py index 6b35070dcd179b..5bb34d59fd4dd0 100644 --- a/ngraph/python/src/ngraph/utils/input_validation.py +++ b/ngraph/python/src/ngraph/utils/input_validation.py @@ -14,7 +14,7 @@ # limitations under the License. # ****************************************************************************** -"""Helper functions for validating user input.""" +"""! Helper functions for validating user input.""" import logging from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type @@ -27,7 +27,7 @@ def assert_list_of_ints(value_list: Iterable[int], message: str) -> None: - """Verify that the provided value is an iterable of integers.""" + """! Verify that the provided value is an iterable of integers.""" try: for value in value_list: if not isinstance(value, int): @@ -39,16 +39,16 @@ def assert_list_of_ints(value_list: Iterable[int], message: str) -> None: def _check_value(op_name, attr_key, value, val_type, cond=None): # type: (str, str, Any, Type, Optional[Callable[[Any], bool]]) -> bool - """Check whether provided value satisfies specified criteria. + """! Check whether provided value satisfies specified criteria. - :param op_name: The operator name which attributes are checked. - :param attr_key: The attribute name. - :param value: The value to check. - :param val_type: Required value type. - :param cond: The optional function running additional checks. + @param op_name: The operator name which attributes are checked. + @param attr_key: The attribute name. + @param value: The value to check. + @param val_type: Required value type. + @param cond: The optional function running additional checks. :raises UserInputError: - :return: True if attribute satisfies all criterias. Otherwise False. + @return True if attribute satisfies all criterias. Otherwise False. """ if not np.issubdtype(type(value), val_type): raise UserInputError( @@ -67,19 +67,19 @@ def _check_value(op_name, attr_key, value, val_type, cond=None): def check_valid_attribute(op_name, attr_dict, attr_key, val_type, cond=None, required=False): # type: (str, dict, str, Type, Optional[Callable[[Any], bool]], Optional[bool]) -> bool - """Check whether specified attribute satisfies given criteria. - - :param op_name: The operator name which attributes are checked. - :param attr_dict: Dictionary containing key-value attributes to check. - :param attr_key: Key value for validated attribute. - :param val_type: Value type for validated attribute. - :param cond: Any callable wich accept attribute value and returns True or False. - :param required: Whether provided attribute key is not required. This mean it may be missing + """! Check whether specified attribute satisfies given criteria. + + @param op_name: The operator name which attributes are checked. + @param attr_dict: Dictionary containing key-value attributes to check. + @param attr_key: Key value for validated attribute. + @param val_type: Value type for validated attribute. + @param cond: Any callable wich accept attribute value and returns True or False. + @param required: Whether provided attribute key is not required. This mean it may be missing from provided dictionary. :raises UserInputError: - :return: True if attribute satisfies all criterias. Otherwise False. + @return True if attribute satisfies all criterias. Otherwise False. """ result = True @@ -110,11 +110,11 @@ def check_valid_attributes( requirements, # type: List[Tuple[str, bool, Type, Optional[Callable]]] ): # type: (...) -> bool - """Perform attributes validation according to specified type, value criteria. + """! Perform attributes validation according to specified type, value criteria. - :param op_name: The operator name which attributes are checked. - :param attributes: The dictionary with user provided attributes to check. - :param requirements: The list of tuples describing attributes' requirements. The tuple should + @param op_name: The operator name which attributes are checked. + @param attributes: The dictionary with user provided attributes to check. + @param requirements: The list of tuples describing attributes' requirements. The tuple should contain following values: (attr_name: str, is_required: bool, @@ -122,7 +122,7 @@ def check_valid_attributes( value_condition: Callable) :raises UserInputError: - :return: True if all attributes satisfies criterias. Otherwise False. + @return True if all attributes satisfies criterias. Otherwise False. """ for attr, required, val_type, cond in requirements: check_valid_attribute(op_name, attributes, attr, val_type, cond, required) @@ -130,20 +130,20 @@ def check_valid_attributes( def is_positive_value(x): # type: (Any) -> bool - """Determine whether the specified x is positive value. + """! Determine whether the specified x is positive value. - :param x: The value to check. + @param x: The value to check. - :returns: True if the specified x is positive value, False otherwise. + @return True if the specified x is positive value, False otherwise. """ return x > 0 def is_non_negative_value(x): # type: (Any) -> bool - """Determine whether the specified x is non-negative value. + """! Determine whether the specified x is non-negative value. - :param x: The value to check. + @param x: The value to check. - :returns: True if the specified x is non-negative value, False otherwise. + @return True if the specified x is non-negative value, False otherwise. """ return x >= 0 diff --git a/ngraph/python/src/ngraph/utils/node_factory.py b/ngraph/python/src/ngraph/utils/node_factory.py index cff8eb12b9d8ec..39d1d70929d63c 100644 --- a/ngraph/python/src/ngraph/utils/node_factory.py +++ b/ngraph/python/src/ngraph/utils/node_factory.py @@ -9,27 +9,27 @@ class NodeFactory(object): - """Factory front-end to create node objects.""" + """! Factory front-end to create node objects.""" def __init__(self, opset_version: str = DEFAULT_OPSET) -> None: - """Create the NodeFactory object. + """! Create the NodeFactory object. - :param opset_version: The opset version the factory will use to produce ops from. + @param opset_version: The opset version the factory will use to produce ops from. """ self.factory = _NodeFactory(opset_version) def create( self, op_type_name: str, arguments: List[Node], attributes: Optional[Dict[str, Any]] = None ) -> Node: - """Create node object from provided description. + """! Create node object from provided description. The user does not have to provide all node's attributes, but only required ones. - :param op_type_name: The operator type name. - :param arguments: The operator arguments. - :param attributes: The operator attributes. + @param op_type_name: The operator type name. + @param arguments: The operator arguments. + @param attributes: The operator attributes. - :returns: Node object representing requested operator with attributes set. + @return Node object representing requested operator with attributes set. """ if attributes is None: attributes = {} @@ -65,12 +65,12 @@ def create( @staticmethod def _normalize_attr_name(attr_name: str, prefix: str) -> str: - """Normalize attribute name. + """! Normalize attribute name. - :param attr_name: The attribute name. - :param prefix: The prefix to attach to attribute name. + @param attr_name: The attribute name. + @param prefix: The prefix to attach to attribute name. - :returns: The modified attribute name. + @return The modified attribute name. """ # Trim first part of the name if there is only one level of attribute hierarchy. if attr_name.count(".") == 1: @@ -79,32 +79,32 @@ def _normalize_attr_name(attr_name: str, prefix: str) -> str: @classmethod def _normalize_attr_name_getter(cls, attr_name: str) -> str: - """Normalize atr name to be suitable for getter function name. + """! Normalize atr name to be suitable for getter function name. - :param attr_name: The attribute name to normalize + @param attr_name: The attribute name to normalize - :returns: The appropriate getter function name. + @return The appropriate getter function name. """ return cls._normalize_attr_name(attr_name, "get_") @classmethod def _normalize_attr_name_setter(cls, attr_name: str) -> str: - """Normalize attribute name to be suitable for setter function name. + """! Normalize attribute name to be suitable for setter function name. - :param attr_name: The attribute name to normalize + @param attr_name: The attribute name to normalize - :returns: The appropriate setter function name. + @return The appropriate setter function name. """ return cls._normalize_attr_name(attr_name, "set_") @staticmethod def _get_node_attr_value(node: Node, attr_name: str) -> Any: - """Get provided node attribute value. + """! Get provided node attribute value. - :param node: The node we retrieve attribute value from. - :param attr_name: The attribute name. + @param node: The node we retrieve attribute value from. + @param attr_name: The attribute name. - :returns: The node attribute value. + @return The node attribute value. """ if not node._attr_cache_valid: node._attr_cache = node._get_attributes() @@ -113,11 +113,11 @@ def _get_node_attr_value(node: Node, attr_name: str) -> Any: @staticmethod def _set_node_attr_value(node: Node, attr_name: str, value: Any) -> None: - """Set the node attribute value. + """! Set the node attribute value. - :param node: The node we change attribute value for. - :param attr_name: The attribute name. - :param value: The new attribute value. + @param node: The node we change attribute value for. + @param attr_name: The attribute name. + @param value: The new attribute value. """ node._set_attribute(attr_name, value) node._attr_cache[attr_name] = value diff --git a/ngraph/python/src/ngraph/utils/reduction.py b/ngraph/python/src/ngraph/utils/reduction.py index 731a90b3b5b9c1..97197da063e910 100644 --- a/ngraph/python/src/ngraph/utils/reduction.py +++ b/ngraph/python/src/ngraph/utils/reduction.py @@ -20,13 +20,13 @@ def get_reduction_axes(node: Node, reduction_axes: Optional[Iterable[int]]) -> Iterable[int]: - """Get reduction axes if it is None and convert it to set if its type is different. + """! Get reduction axes if it is None and convert it to set if its type is different. If reduction_axes is None we default to reduce all axes. - :param node: The node we fill reduction axes for. - :param reduction_axes: The collection of indices of axes to reduce. May be None. - :return: Set filled with indices of axes we want to reduce. + @param node: The node we fill reduction axes for. + @param reduction_axes: The collection of indices of axes to reduce. May be None. + @return Set filled with indices of axes we want to reduce. """ if reduction_axes is None: reduction_axes = set(range(len(node.shape))) diff --git a/ngraph/python/src/ngraph/utils/tensor_iterator_types.py b/ngraph/python/src/ngraph/utils/tensor_iterator_types.py index f4e1e15bdc3cce..51b5a8507571b9 100644 --- a/ngraph/python/src/ngraph/utils/tensor_iterator_types.py +++ b/ngraph/python/src/ngraph/utils/tensor_iterator_types.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ****************************************************************************** -"""Helper classes for aggregating TensorIterator input/output desciptor attributes.""" +"""! Helper classes for aggregating TensorIterator input/output desciptor attributes.""" from typing import List @@ -22,14 +22,14 @@ class GraphBody(object): - """Class containing graph parameters and results.""" + """! Class containing graph parameters and results.""" def __init__(self, parameters: List[Parameter], results: List[Node],) -> None: self.parameters = parameters self.results = results def serialize(self) -> dict: - """Serialize GraphBody as a dictionary.""" + """! Serialize GraphBody as a dictionary.""" return { "parameters": self.parameters, "results": self.results, @@ -37,14 +37,14 @@ def serialize(self) -> dict: class TensorIteratorInputDesc(object): - """Represents a generic input descriptor for TensorIterator operator.""" + """! Represents a generic input descriptor for TensorIterator operator.""" def __init__(self, input_idx: int, body_parameter_idx: int,) -> None: self.input_idx = input_idx self.body_parameter_idx = body_parameter_idx def serialize(self) -> dict: - """Serialize TensorIteratorInputDesc as a dictionary.""" + """! Serialize TensorIteratorInputDesc as a dictionary.""" return { "input_idx": self.input_idx, "body_parameter_idx": self.body_parameter_idx, @@ -52,7 +52,7 @@ def serialize(self) -> dict: class TensorIteratorSliceInputDesc(TensorIteratorInputDesc): - """Represents a TensorIterator graph body input formed from slices of TensorIterator input.""" + """! Represents a TensorIterator graph body input formed from slices of TensorIterator input.""" def __init__( self, @@ -72,7 +72,7 @@ def __init__( self.axis = axis def serialize(self) -> dict: - """Serialize TensorIteratorSliceInputDesc as a dictionary.""" + """! Serialize TensorIteratorSliceInputDesc as a dictionary.""" output = super().serialize() output["start"] = self.start output["stride"] = self.stride @@ -83,7 +83,7 @@ def serialize(self) -> dict: class TensorIteratorMergedInputDesc(TensorIteratorInputDesc): - """Represents a TensorIterator graph body input with initial value in the first iteration. + """! Represents a TensorIterator graph body input with initial value in the first iteration. Later on, this input value is computed inside graph body. """ @@ -93,28 +93,28 @@ def __init__(self, input_idx: int, body_parameter_idx: int, body_value_idx: int, self.body_value_idx = body_value_idx def serialize(self) -> dict: - """Serialize TensorIteratorMergedInputDesc as a dictionary.""" + """! Serialize TensorIteratorMergedInputDesc as a dictionary.""" output = super().serialize() output["body_value_idx"] = self.body_value_idx return output class TensorIteratorInvariantInputDesc(TensorIteratorInputDesc): - """Represents a TensorIterator graph body input that has invariant value during iteration.""" + """! Represents a TensorIterator graph body input that has invariant value during iteration.""" def __init__(self, input_idx: int, body_parameter_idx: int,) -> None: super().__init__(input_idx, body_parameter_idx) class TensorIteratorOutputDesc(object): - """Represents a generic output descriptor for TensorIterator operator.""" + """! Represents a generic output descriptor for TensorIterator operator.""" def __init__(self, body_value_idx: int, output_idx: int,) -> None: self.body_value_idx = body_value_idx self.output_idx = output_idx def serialize(self) -> dict: - """Serialize TensorIteratorOutputDesc as a dictionary.""" + """! Serialize TensorIteratorOutputDesc as a dictionary.""" return { "body_value_idx": self.body_value_idx, "output_idx": self.output_idx, @@ -122,21 +122,21 @@ def serialize(self) -> dict: class TensorIteratorBodyOutputDesc(TensorIteratorOutputDesc): - """Represents an output from a specific iteration.""" + """! Represents an output from a specific iteration.""" def __init__(self, body_value_idx: int, output_idx: int, iteration: int,) -> None: super().__init__(body_value_idx, output_idx) self.iteration = iteration def serialize(self) -> dict: - """Serialize TensorIteratorBodyOutputDesc as a dictionary.""" + """! Serialize TensorIteratorBodyOutputDesc as a dictionary.""" output = super().serialize() output["iteration"] = self.iteration return output class TensorIteratorConcatOutputDesc(TensorIteratorOutputDesc): - """Represents an output produced by concatenation of output from each iteration.""" + """! Represents an output produced by concatenation of output from each iteration.""" def __init__( self, @@ -156,7 +156,7 @@ def __init__( self.axis = axis def serialize(self) -> dict: - """Serialize TensorIteratorConcatOutputDesc as a dictionary.""" + """! Serialize TensorIteratorConcatOutputDesc as a dictionary.""" output = super().serialize() output["start"] = self.start output["stride"] = self.stride diff --git a/ngraph/python/src/ngraph/utils/types.py b/ngraph/python/src/ngraph/utils/types.py index 185503fa61a29d..265de69f6360d6 100644 --- a/ngraph/python/src/ngraph/utils/types.py +++ b/ngraph/python/src/ngraph/utils/types.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ****************************************************************************** -"""Functions related to converting between Python and numpy types and ngraph types.""" +"""! Functions related to converting between Python and numpy types and ngraph types.""" import logging from typing import List, Union @@ -66,7 +66,7 @@ def get_element_type(data_type: NumericType) -> NgraphType: - """Return an ngraph element type for a Python type or numpy.dtype.""" + """! Return an ngraph element type for a Python type or numpy.dtype.""" if data_type is int: log.warning("Converting int type of undefined bitwidth to 32-bit ngraph integer.") return NgraphType.i32 @@ -85,7 +85,7 @@ def get_element_type(data_type: NumericType) -> NgraphType: def get_element_type_str(data_type: NumericType) -> str: - """Return an ngraph element type string representation for a Python type or numpy dtype.""" + """! Return an ngraph element type string representation for a Python type or numpy dtype.""" if data_type is int: log.warning("Converting int type of undefined bitwidth to 32-bit ngraph integer.") return "i32" @@ -105,7 +105,7 @@ def get_element_type_str(data_type: NumericType) -> str: def get_dtype(ngraph_type: NgraphType) -> np.dtype: - """Return a numpy.dtype for an ngraph element type.""" + """! Return a numpy.dtype for an ngraph element type.""" np_type = next( (np_type for (ng_type, np_type) in ngraph_to_numpy_types_map if ng_type == ngraph_type), None, @@ -118,14 +118,14 @@ def get_dtype(ngraph_type: NgraphType) -> np.dtype: def get_ndarray(data: NumericData) -> np.ndarray: - """Wrap data into a numpy ndarray.""" + """! Wrap data into a numpy ndarray.""" if type(data) == np.ndarray: return data return np.array(data) def get_shape(data: NumericData) -> TensorShape: - """Return a shape of NumericData.""" + """! Return a shape of NumericData.""" if type(data) == np.ndarray: return data.shape # type: ignore elif type(data) == list: @@ -134,7 +134,7 @@ def get_shape(data: NumericData) -> TensorShape: def make_constant_node(value: NumericData, dtype: NumericType = None) -> Constant: - """Return an ngraph Constant node with the specified value.""" + """! Return an ngraph Constant node with the specified value.""" ndarray = get_ndarray(value) if dtype: element_type = get_element_type(dtype) @@ -145,12 +145,12 @@ def make_constant_node(value: NumericData, dtype: NumericType = None) -> Constan def as_node(input_value: NodeInput) -> Node: - """Return input values as nodes. Scalars will be converted to Constant nodes.""" + """! Return input values as nodes. Scalars will be converted to Constant nodes.""" if issubclass(type(input_value), Node): return input_value return make_constant_node(input_value) def as_nodes(*input_values: NodeInput) -> List[Node]: - """Return input values as nodes. Scalars will be converted to Constant nodes.""" + """! Return input values as nodes. Scalars will be converted to Constant nodes.""" return [as_node(input_value) for input_value in input_values] From 092e47da6632cca755f6f56a9c53854d0019efc0 Mon Sep 17 00:00:00 2001 From: Nikolay Tyukaev Date: Fri, 25 Sep 2020 20:49:24 +0300 Subject: [PATCH 02/41] layouts and code comments --- docs/doxygen/ie_c_api.xml | 4 ++-- docs/doxygen/ie_docs.xml | 4 ++-- docs/doxygen/ie_py_api.xml | 4 ++-- docs/doxygen/ngraph_cpp_api.xml | 4 ++-- docs/doxygen/ngraph_py_api.xml | 13 ++++--------- ngraph/python/src/ngraph/opset1/ops.py | 2 +- ngraph/python/src/ngraph/opset4/ops.py | 3 ++- 7 files changed, 15 insertions(+), 19 deletions(-) diff --git a/docs/doxygen/ie_c_api.xml b/docs/doxygen/ie_c_api.xml index 1e650bfb0db6f8..d61d18ed6226a6 100644 --- a/docs/doxygen/ie_c_api.xml +++ b/docs/doxygen/ie_c_api.xml @@ -30,11 +30,11 @@ - + - + diff --git a/docs/doxygen/ie_docs.xml b/docs/doxygen/ie_docs.xml index aa746fea5e0738..7a033dbc83ef82 100644 --- a/docs/doxygen/ie_docs.xml +++ b/docs/doxygen/ie_docs.xml @@ -812,11 +812,11 @@ - + - + diff --git a/docs/doxygen/ie_py_api.xml b/docs/doxygen/ie_py_api.xml index 24655dfc0b9155..3659453341f5fe 100644 --- a/docs/doxygen/ie_py_api.xml +++ b/docs/doxygen/ie_py_api.xml @@ -28,11 +28,11 @@ - + - + diff --git a/docs/doxygen/ngraph_cpp_api.xml b/docs/doxygen/ngraph_cpp_api.xml index 018e5ea6de86cc..ff0621b5bed70b 100644 --- a/docs/doxygen/ngraph_cpp_api.xml +++ b/docs/doxygen/ngraph_cpp_api.xml @@ -18,7 +18,7 @@ - + @@ -30,7 +30,7 @@ - + diff --git a/docs/doxygen/ngraph_py_api.xml b/docs/doxygen/ngraph_py_api.xml index 39da4397dd6b6d..9a482af7fe17cc 100644 --- a/docs/doxygen/ngraph_py_api.xml +++ b/docs/doxygen/ngraph_py_api.xml @@ -18,17 +18,12 @@ - + - - - - - - - - + + + diff --git a/ngraph/python/src/ngraph/opset1/ops.py b/ngraph/python/src/ngraph/opset1/ops.py index fa47680c951b7e..9ee2ab51ee9ab8 100644 --- a/ngraph/python/src/ngraph/opset1/ops.py +++ b/ngraph/python/src/ngraph/opset1/ops.py @@ -2784,7 +2784,7 @@ def tensor_iterator( concat_output_desc: List[TensorIteratorConcatOutputDesc], name: Optional[str] = None, ) -> Node: - """ + """! Perform recurrent execution of the network described in the body, iterating through the data. @param inputs: The provided to TensorIterator operator. diff --git a/ngraph/python/src/ngraph/opset4/ops.py b/ngraph/python/src/ngraph/opset4/ops.py index 5e64f1ea547cdd..6e3fc1b7e2fa62 100644 --- a/ngraph/python/src/ngraph/opset4/ops.py +++ b/ngraph/python/src/ngraph/opset4/ops.py @@ -296,7 +296,7 @@ def proposal( Default value: "" (empty string) Required: no Example of attribute dictionary: - .. code-block:: python + ~~~~~~~~~~~~~~~~~~~~~~~~{.py} # just required ones attrs = { 'base_size': 85, @@ -308,6 +308,7 @@ def proposal( 'ratio': [0.1, 1.5, 2.0, 2.5], 'scale': [2, 3, 3, 4], } + ~~~~~~~~~~~~~~~~~~~~~~~~ Optional attributes which are absent from dictionary will be set with corresponding default. @return Node representing Proposal operation. """ From c06342389d51c36bdc8f3e460950f74cfee06729 Mon Sep 17 00:00:00 2001 From: Nikolay Tyukaev Date: Thu, 1 Oct 2020 16:41:47 +0300 Subject: [PATCH 03/41] separate layout --- docs/doxygen/ie_docs.xml | 1223 +++++++------------------------- docs/doxygen/openvino_docs.xml | 283 ++++++++ 2 files changed, 559 insertions(+), 947 deletions(-) create mode 100644 docs/doxygen/openvino_docs.xml diff --git a/docs/doxygen/ie_docs.xml b/docs/doxygen/ie_docs.xml index f4d2183ad6b9d0..7d7d5f4835a129 100644 --- a/docs/doxygen/ie_docs.xml +++ b/docs/doxygen/ie_docs.xml @@ -2,802 +2,281 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + - - - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + - - - - - + + + + + - - - + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - - + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - - + + + + + + + + + + + + - - - + - - - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + - - - - - - - - + + + - - - - - - - - + + + + + + + @@ -812,176 +291,26 @@ + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/doxygen/openvino_docs.xml b/docs/doxygen/openvino_docs.xml new file mode 100644 index 00000000000000..1ffe0005cf1d7a --- /dev/null +++ b/docs/doxygen/openvino_docs.xml @@ -0,0 +1,283 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From af0ecd99d780c1b5f33ba5a92e8e7bfbb7db3f8b Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Fri, 2 Oct 2020 03:19:53 +0300 Subject: [PATCH 04/41] Changed layouts --- docs/doxygen/ie_docs.xml | 62 ++++++++++------------------------ docs/doxygen/openvino_docs.xml | 30 +++++++++++----- 2 files changed, 39 insertions(+), 53 deletions(-) diff --git a/docs/doxygen/ie_docs.xml b/docs/doxygen/ie_docs.xml index b3ccdc45c5e876..1e45e2bff56af4 100644 --- a/docs/doxygen/ie_docs.xml +++ b/docs/doxygen/ie_docs.xml @@ -256,7 +256,7 @@ - + @@ -275,55 +275,29 @@ - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/doxygen/openvino_docs.xml b/docs/doxygen/openvino_docs.xml index 1ffe0005cf1d7a..3e930e6443609d 100644 --- a/docs/doxygen/openvino_docs.xml +++ b/docs/doxygen/openvino_docs.xml @@ -5,7 +5,6 @@ - @@ -27,15 +26,22 @@ - - - + + + + - - - + + + + + + + + + @@ -52,14 +58,19 @@ - + + + + + - + + @@ -98,6 +109,7 @@ + From bb19e1c16bbe84b1b37606991c3fe0cdc09a41b3 Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Fri, 2 Oct 2020 13:07:42 +0300 Subject: [PATCH 05/41] Removed FPGA from the documentation --- build-instruction.md | 3 - ...Deep_Learning_Inference_Engine_DevGuide.md | 3 +- docs/IE_DG/Glossary.md | 2 +- docs/IE_DG/Intro_to_Performance.md | 2 +- docs/IE_DG/inference_engine_intro.md | 6 +- docs/IE_DG/supported_plugins/FPGA.md | 292 +----------- .../supported_plugins/Supported_Devices.md | 297 ++++++------ .../IE_Dev_Procedure.md | 1 - docs/get_started/get_started_linux.md | 2 +- docs/get_started/get_started_macos.md | 2 +- docs/get_started/get_started_windows.md | 2 +- docs/how_tos/how-to-links.md | 7 - docs/index.md | 3 +- docs/install_guides/PAC_Configure.md | 248 +--------- .../VisionAcceleratorFPGA_Configure.md | 333 +------------- ...VisionAcceleratorFPGA_Configure_Windows.md | 122 +---- .../installing-openvino-docker-linux.md | 39 +- .../installing-openvino-linux-fpga.md | 332 +------------- .../installing-openvino-linux.md | 1 - .../installing-openvino-windows-fpga.md | 434 +----------------- .../installing-openvino-windows.md | 2 - 21 files changed, 206 insertions(+), 1927 deletions(-) diff --git a/build-instruction.md b/build-instruction.md index 32e919d63ea8b8..0237d94423cfa4 100644 --- a/build-instruction.md +++ b/build-instruction.md @@ -46,9 +46,6 @@ The open source version of Inference Engine includes the following plugins: | MYRIAD plugin | Intel® Movidius™ Neural Compute Stick powered by the Intel® Movidius™ Myriad™ 2, Intel® Neural Compute Stick 2 powered by the Intel® Movidius™ Myriad™ X | | Heterogeneous plugin | Heterogeneous plugin enables computing for inference on one network on several Intel® devices. | -Inference Engine plugin for Intel® FPGA is distributed only in a binary form, -as a part of [Intel® Distribution of OpenVINO™]. - ## Build on Linux\* Systems The software was validated on: diff --git a/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md b/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md index 08e41be17bfa18..a6617d3718b46e 100644 --- a/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md +++ b/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md @@ -21,7 +21,7 @@ The OpenVINO™ toolkit includes the following components: preparing them for optimal execution with the Deep Learning Inference Engine. The Model Optimizer supports converting Caffe*, TensorFlow*, MXNet*, Kaldi*, ONNX* models. - [Deep Learning Inference Engine](inference_engine_intro.md) — A unified API to allow high performance inference on many hardware types - including Intel® CPU, Intel® Processor Graphics, Intel® FPGA, Intel® Neural Compute Stick 2. + including Intel® CPU, Intel® Processor Graphics, Intel® Neural Compute Stick 2. - [nGraph](nGraph_Flow.md) — graph representation and manipulation engine which is used to represent a model inside Inference Engine and allows the run-time model construction without using Model Optimizer. * [OpenCV](https://docs.opencv.org/) — OpenCV* community version compiled for Intel® hardware. Includes PVL libraries for computer vision. @@ -78,7 +78,6 @@ inference of a pre-trained and optimized deep learning model and a set of sample * [Supported Devices](supported_plugins/Supported_Devices.md) * [GPU](supported_plugins/CL_DNN.md) * [CPU](supported_plugins/CPU.md) - * [FPGA](supported_plugins/FPGA.md) * [VPU](supported_plugins/VPU.md) * [MYRIAD](supported_plugins/MYRIAD.md) * [HDDL](supported_plugins/HDDL.md) diff --git a/docs/IE_DG/Glossary.md b/docs/IE_DG/Glossary.md index 780a3d5fcab7d8..047d4484a6682b 100644 --- a/docs/IE_DG/Glossary.md +++ b/docs/IE_DG/Glossary.md @@ -64,7 +64,7 @@ Glossary of terms used in the Inference Engine | :--- | :--- | | Batch | Number of images to analyze during one call of infer. Maximum batch size is a property of the network and it is set before loading of the network to the plugin. In NHWC, NCHW and NCDHW image data layout representation, the N refers to the number of images in the batch | | Blob | Memory container used for storing inputs, outputs of the network, weights and biases of the layers | -| Device (Affinitity) | A preferred Intel(R) hardware device to run the inference (CPU, GPU, FPGA, etc.) | +| Device (Affinitity) | A preferred Intel(R) hardware device to run the inference (CPU, GPU, etc.) | | Extensibility mechanism, Custom layers | The mechanism that provides you with capabilities to extend the Inference Engine and Model Optimizer so that they can work with topologies containing layers that are not yet supported | | ICNNNetwork | An Interface of the Convolutional Neural Network that Inference Engine reads from IR. Consists of topology, weights and biases | | IExecutableNetwork | An instance of the loaded network which allows the Inference Engine to request (several) infer requests and perform inference synchronously or asynchronously | diff --git a/docs/IE_DG/Intro_to_Performance.md b/docs/IE_DG/Intro_to_Performance.md index 2987a3628bab17..12913c5811c7c9 100644 --- a/docs/IE_DG/Intro_to_Performance.md +++ b/docs/IE_DG/Intro_to_Performance.md @@ -27,7 +27,7 @@ latency penalty. So, for more real-time oriented usages, lower batch sizes (as l Refer to the [Benchmark App](../../inference-engine/samples/benchmark_app/README.md) sample, which allows latency vs. throughput measuring. ## Using Async API -To gain better performance on accelerators, such as VPU or FPGA, the Inference Engine uses the asynchronous approach (see +To gain better performance on accelerators, such as VPU, the Inference Engine uses the asynchronous approach (see [Integrating Inference Engine in Your Application (current API)](Integrate_with_customer_application_new_API.md)). The point is amortizing the costs of data transfers, by pipe-lining, see [Async API explained](@ref omz_demos_object_detection_demo_ssd_async_README). Since the pipe-lining relies on the availability of the parallel slack, running multiple inference requests in parallel is essential. diff --git a/docs/IE_DG/inference_engine_intro.md b/docs/IE_DG/inference_engine_intro.md index cb3b43fcab72dc..c69166a34118a9 100644 --- a/docs/IE_DG/inference_engine_intro.md +++ b/docs/IE_DG/inference_engine_intro.md @@ -3,7 +3,7 @@ Introduction to Inference Engine {#openvino_docs_IE_DG_inference_engine_intro} After you have used the Model Optimizer to create an Intermediate Representation (IR), use the Inference Engine to infer the result for a given input data. -Inference Engine is a set of C++ libraries providing a common API to deliver inference solutions on the platform of your choice: CPU, GPU, VPU, or FPGA. Use the Inference Engine API to read the Intermediate Representation, set the input and output formats, and execute the model on devices. While the C++ libraries is the primary implementation, C libraries and Python bindings are also available. +Inference Engine is a set of C++ libraries providing a common API to deliver inference solutions on the platform of your choice: CPU, GPU, or VPU. Use the Inference Engine API to read the Intermediate Representation, set the input and output formats, and execute the model on devices. While the C++ libraries is the primary implementation, C libraries and Python bindings are also available. For Intel® Distribution of OpenVINO™ toolkit, Inference Engine binaries are delivered within release packages. @@ -13,7 +13,7 @@ To learn about how to use the Inference Engine API for your application, see the For complete API Reference, see the [API Reference](usergroup29.html) section. -Inference Engine uses a plugin architecture. Inference Engine plugin is a software component that contains complete implementation for inference on a certain Intel® hardware device: CPU, GPU, VPU, FPGA, etc. Each plugin implements the unified API and provides additional hardware-specific APIs. +Inference Engine uses a plugin architecture. Inference Engine plugin is a software component that contains complete implementation for inference on a certain Intel® hardware device: CPU, GPU, VPU, etc. Each plugin implements the unified API and provides additional hardware-specific APIs. Modules in the Inference Engine component --------------------------------------- @@ -53,7 +53,6 @@ For each supported target device, Inference Engine provides a plugin — a DLL/s | ------------- | ------------- | |CPU| Intel® Xeon® with Intel® AVX2 and AVX512, Intel® Core™ Processors with Intel® AVX2, Intel® Atom® Processors with Intel® SSE | |GPU| Intel® Processor Graphics, including Intel® HD Graphics and Intel® Iris® Graphics -|FPGA| Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA, Intel® Vision Accelerator Design with an Intel® Arria 10 FPGA (Speed Grade 2) | |MYRIAD| Intel® Neural Compute Stick 2 powered by the Intel® Movidius™ Myriad™ X| |GNA| Intel® Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel® Pentium® Silver J5005 Processor, Intel® Pentium® Silver N5000 Processor, Intel® Celeron® J4005 Processor, Intel® Celeron® J4105 Processor, Intel® Celeron® Processor N4100, Intel® Celeron® Processor N4000, Intel® Core™ i3-8121U Processor, Intel® Core™ i7-1065G7 Processor, Intel® Core™ i7-1060G7 Processor, Intel® Core™ i5-1035G4 Processor, Intel® Core™ i5-1035G7 Processor, Intel® Core™ i5-1035G1 Processor, Intel® Core™ i5-1030G7 Processor, Intel® Core™ i5-1030G4 Processor, Intel® Core™ i3-1005G1 Processor, Intel® Core™ i3-1000G1 Processor, Intel® Core™ i3-1000G4 Processor |HETERO|Automatic splitting of a network inference between several devices (for example if a device doesn't support certain layers| @@ -65,7 +64,6 @@ The table below shows the plugin libraries and additional dependencies for Linux |--------|------------------------|-------------------------------------------------|--------------------------|--------------------------------------------------------------------------------------------------------| | CPU | `libMKLDNNPlugin.so` | `libinference_engine_lp_transformations.so` | `MKLDNNPlugin.dll` | `inference_engine_lp_transformations.dll` | | GPU | `libclDNNPlugin.so` | `libinference_engine_lp_transformations.so`, `libOpenCL.so` | `clDNNPlugin.dll` | `OpenCL.dll`, `inference_engine_lp_transformations.dll` | -| FPGA | `libdliaPlugin.so` | `libdla_compiler_core.so`, `libdla_runtime_core.so`, `libcrypto.so`, `libalteracl.so`, `liblpsolve5525.so`, `libprotobuf.so`, `libacl_emulator_kernel_rt.so` | `dliaPlugin.dll` | `dla_compiler_core.dll`, `dla_runtime_core.dll`, `crypto.dll`, `alteracl.dll`, `lpsolve5525.dll`, `protobuf.dll`, `acl_emulator_kernel_rt.dll` | MYRIAD | `libmyriadPlugin.so` | `libusb.so`, `libinference_engine_lp_transformations.so` | `myriadPlugin.dll` | `usb.dll`, `inference_engine_lp_transformations.dll` | | HDDL | `libHDDLPlugin.so` | `libbsl.so`, `libhddlapi.so`, `libmvnc-hddl.so`, `libinference_engine_lp_transformations.so`| `HDDLPlugin.dll` | `bsl.dll`, `hddlapi.dll`, `json-c.dll`, `libcrypto-1_1-x64.dll`, `libssl-1_1-x64.dll`, `mvnc-hddl.dll`, `inference_engine_lp_transformations.dll` | | GNA | `libGNAPlugin.so` | `libgna.so`, `libinference_engine_lp_transformations.so` | `GNAPlugin.dll` | `gna.dll`, `inference_engine_lp_transformations.dll` | diff --git a/docs/IE_DG/supported_plugins/FPGA.md b/docs/IE_DG/supported_plugins/FPGA.md index ee76253db04d70..63ae6e62ed7be0 100644 --- a/docs/IE_DG/supported_plugins/FPGA.md +++ b/docs/IE_DG/supported_plugins/FPGA.md @@ -19,294 +19,4 @@ Intel will be transitioning to the next-generation programmable deep-learning so Intel® Distribution of OpenVINO™ toolkit 2020.3.X LTS release will continue to support Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. For questions about next-generation programmable deep-learning solutions based on FPGAs, please talk to your sales representative or contact us to get the latest FPGA updates. -## Introducing FPGA Plugin - -The FPGA plugin provides an opportunity for high performance scoring of neural networks on Intel® FPGA devices. - -> **NOTE**: Before using the FPGA plugin, ensure that you have installed and configured either the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) or the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. For installation and configuration details, see [FPGA installation](Supported_Devices.md). - -## Heterogeneous Execution - -When your topology contains layers that are not supported by the Intel® FPGA plugin, use [Heterogeneous plugin](HETERO.md) with dedicated fallback device. - -If a network has layers that are not supported in the Intel® FPGA plugin or in a fallback plugin, you can implement a custom layer on the CPU/GPU and use the [Extensibility mechanism](../Extensibility_DG/Intro.md). -In addition to adding custom kernels, you must still point to the CPU plugin or the GPU plugin as fallback devices for heterogeneous plugin. - -## Supported Networks - -The following network topologies are supported in heterogeneous mode, running on FPGA with fallback to CPU or GPU devices. - -> **IMPORTANT**: Use only bitstreams from the current version of the OpenVINO toolkit. Bitstreams from older versions of the OpenVINO toolkit are incompatible with later versions of the OpenVINO toolkit. For example, you cannot use the `1-0-1_A10DK_FP16_Generic` bitstream, when the OpenVINO toolkit supports the `2019R2_PL2_FP16_InceptionV1_SqueezeNet_VGG_YoloV3.aocx` bitstream. - - -| Network | Bitstreams (Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2)) | Bitstreams (Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA) | -|:-------------------------------------|:-------------------------------------------------------------------|:---------------------------------------------------------------------------------------------| -| AlexNet | 2020-4_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic, 2020-4_PL2_FP11_AlexNet_GoogleNet_Generic | 2020-4_RC_FP16_AlexNet_GoogleNet_Generic, 2020-4_RC_FP11_AlexNet_GoogleNet_Generic | -| GoogleNet v1 | 2020-4_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic, 2020-4_PL2_FP11_AlexNet_GoogleNet_Generic | 2020-4_RC_FP16_AlexNet_GoogleNet_Generic, 2020-4_RC_FP11_AlexNet_GoogleNet_Generic | -| VGG-16 | 2020-4_PL2_FP16_SqueezeNet_TinyYolo_VGG, 2020-4_PL2_FP11_InceptionV1_ResNet_VGG | 2020-4_RC_FP16_InceptionV1_SqueezeNet_TinyYolo_VGG, 2020-4_RC_FP16_ResNet_TinyYolo_VGG | -| VGG-19 | 2020-4_PL2_FP16_SqueezeNet_TinyYolo_VGG, 2020-4_PL2_FP11_InceptionV1_ResNet_VGG | 2020-4_RC_FP16_InceptionV1_SqueezeNet_TinyYolo_VGG, 2020-4_RC_FP16_ResNet_TinyYolo_VGG | -| SqueezeNet v 1.0 | 2020-4_PL2_FP16_SqueezeNet_TinyYolo_VGG, 2020-4_PL2_FP11_SqueezeNet | 2020-4_RC_FP16_InceptionV1_SqueezeNet_YoloV3, 2020-4_RC_FP16_InceptionV1_SqueezeNet_YoloV3 | -| SqueezeNet v 1.1 | 2020-4_PL2_FP16_SqueezeNet_TinyYolo_VGG, 2020-4_PL2_FP11_SqueezeNet | 2020-4_RC_FP16_InceptionV1_SqueezeNet_YoloV3, 2020-4_RC_FP16_InceptionV1_SqueezeNet_YoloV3 | -| ResNet-18 | 2020-4_PL2_FP16_ResNet_YoloV3, 2020-4_PL2_FP11_InceptionV1_ResNet_VGG | 2020-4_RC_FP16_ResNet_YoloV3, 2020-4_RC_FP16_ResNet_TinyYolo_VGG | -| ResNet-50 | 2020-4_PL2_FP16_ResNet_YoloV3, 2020-4_PL2_FP11_InceptionV1_ResNet_VGG | 2020-4_RC_FP16_ResNet_YoloV3, 2020-4_RC_FP16_ResNet_TinyYolo_VGG | -| ResNet-101 | 2020-4_PL2_FP16_ResNet_YoloV3, 2020-4_PL2_FP11_InceptionV1_ResNet_VGG | 2020-4_RC_FP16_ResNet_YoloV3, 2020-4_RC_FP16_ResNet_TinyYolo_VGG | -| ResNet-152 | 2020-4_PL2_FP16_ResNet_YoloV3, 2020-4_PL2_FP11_InceptionV1_ResNet_VGG | 2020-4_RC_FP16_ResNet_YoloV3, 2020-4_RC_FP16_ResNet_TinyYolo_VGG | -| MobileNet (Caffe) | 2020-4_PL2_FP16_MobileNet_Clamp, 2020-4_PL2_FP11_MobileNet_Clamp | 2020-4_RC_FP16_MobileNet_Clamp, 2020-4_RC_FP11_MobileNet_Clamp | -| MobileNet (TensorFlow) | 2020-4_PL2_FP16_MobileNet_Clamp, 2020-4_PL2_FP11_MobileNet_Clamp | 2020-4_RC_FP16_MobileNet_Clamp, 2020-4_RC_FP11_MobileNet_Clamp| -| SqueezeNet-based variant of the SSD* | 2020-4_PL2_FP16_SqueezeNet_TinyYolo_VGG, 2020-4_PL2_FP11_SqueezeNet | 2020-4_RC_FP16_InceptionV1_SqueezeNet_TinyYolo_VGG, 2020-4_RC_FP16_InceptionV1_SqueezeNet_YoloV3 | -| ResNet-based variant of SSD | 2020-4_PL2_FP16_ResNet_YoloV3, 2020-4_PL2_FP11_InceptionV1_ResNet_VGG | 2020-4_RC_FP16_ResNet_YoloV3, 2020-4_RC_FP16_ResNet_TinyYolo_VGG | -| RMNet | 2020-4_PL2_FP16_RMNet, 2020-4_PL2_FP11_RMNet | 2020-4_RC_FP16_RMNet, 2020-4_RC_FP11_RMNet | -| Yolo v3 | 2020-4_PL2_FP16_ResNet_YoloV3, 2020-4_PL2_FP11_YoloV3_ELU | 2020-4_RC_FP16_ResNet_YoloV3, 2020-4_RC_FP16_InceptionV1_SqueezeNet_YoloV3 | - - -In addition to the list above, arbitrary topologies having big continues subgraphs consisting of layers supported by FPGA plugin are recommended to be executed on FPGA plugin. - -## Bitstreams that are Optimal to Use with the Intel's Pre-Trained Models - -The table below provides you with a list of Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) bitstreams that are optimal to use for the Intel's pre-trained models. - -
- Click to expand/collapse the table - -| Model Name | FP11 Bitstreams | FP16 Bitstreams | -| :--- | :--- | :--- | -| action-recognition-0001-decoder | 2020-3_PL2_FP11_AlexNet_GoogleNet_Generic.aocx | 2020-3_PL2_FP16_SwishExcitation.aocx | -| action-recognition-0001-encoder | 2020-3_PL2_FP11_InceptionV1_ResNet_VGG.aocx | 2020-3_PL2_FP16_ResNet_YoloV3.aocx | -| age-gender-recognition-retail-0013 | 2020-3_PL2_FP11_AlexNet_GoogleNet_Generic.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| asl-recognition-0004 | 2020-3_PL2_FP11_AlexNet_GoogleNet_Generic.aocx | 2020-3_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic.aocx | -| driver-action-recognition-adas-0002-decoder | 2020-3_PL2_FP11_AlexNet_GoogleNet_Generic.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| driver-action-recognition-adas-0002-encoder | 2020-3_PL2_FP11_YoloV3_ELU.aocx | 2020-3_PL2_FP16_SwishExcitation.aocx | -| emotions-recognition-retail-0003 | 2020-3_PL2_FP11_AlexNet_GoogleNet_Generic.aocx | 2020-3_PL2_FP16_SqueezeNet_TinyYolo_VGG.aocx | -| face-detection-0100 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| face-detection-0102 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| face-detection-0104 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| face-detection-0105 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| face-detection-0106 | 2020-3_PL2_FP11_InceptionV1_ResNet_VGG.aocx | 2020-3_PL2_FP16_ResNet_YoloV3.aocx | -| face-detection-adas-0001 | 2020-3_PL2_FP11_YoloV3_ELU.aocx | 2020-3_PL2_FP16_SwishExcitation.aocx | -| face-detection-adas-binary-0001 | 2020-3_PL2_FP11_AlexNet_GoogleNet_Generic.aocx | 2020-3_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic.aocx | -| face-detection-retail-0004 | 2020-3_PL2_FP11_TinyYolo_SSD300.aocx | 2020-3_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic.aocx | -| face-detection-retail-0005 | 2020-3_PL2_FP11_YoloV3_ELU.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| face-reidentification-retail-0095 | 2020-3_PL2_FP11_SqueezeNet.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| facial-landmarks-35-adas-0002 | 2020-3_PL2_FP11_AlexNet_GoogleNet_Generic.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| faster-rcnn-resnet101-coco-sparse-60-0001 | 2020-3_PL2_FP11_InceptionV1_ResNet_VGG.aocx | 2020-3_PL2_FP16_SwishExcitation.aocx | -| gaze-estimation-adas-0002 | 2020-3_PL2_FP11_SqueezeNet.aocx | 2020-3_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic.aocx | -| handwritten-japanese-recognition-0001 | 2020-3_PL2_FP11_InceptionV1_ResNet_VGG.aocx | 2020-3_PL2_FP16_ResNet_YoloV3.aocx | -| handwritten-score-recognition-0003 | 2020-3_PL2_FP11_InceptionV1_ResNet_VGG.aocx | 2020-3_PL2_FP16_SqueezeNet_TinyYolo_VGG.aocx | -| head-pose-estimation-adas-0001 | 2020-3_PL2_FP11_SqueezeNet.aocx | 2020-3_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic.aocx | -| human-pose-estimation-0001 | 2020-3_PL2_FP11_YoloV3_ELU.aocx | 2020-3_PL2_FP16_SwishExcitation.aocx | -| icnet-camvid-ava-0001 | 2020-3_PL2_FP11_AlexNet_GoogleNet_Generic.aocx | 2020-3_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic.aocx | -| icnet-camvid-ava-sparse-30-0001 | 2020-3_PL2_FP11_SqueezeNet.aocx | 2020-3_PL2_FP16_SwishExcitation.aocx | -| icnet-camvid-ava-sparse-60-0001 | 2020-3_PL2_FP11_SqueezeNet.aocx | 2020-3_PL2_FP16_SwishExcitation.aocx | -| image-retrieval-0001 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| instance-segmentation-security-0010 | 2020-3_PL2_FP11_InceptionV1_ResNet_VGG.aocx | 2020-3_PL2_FP16_SqueezeNet_TinyYolo_VGG.aocx | -| instance-segmentation-security-0050 | 2020-3_PL2_FP11_InceptionV1_ResNet_VGG.aocx | 2020-3_PL2_FP16_ResNet_YoloV3.aocx | -| instance-segmentation-security-0083 | 2020-3_PL2_FP11_AlexNet_GoogleNet_Generic.aocx | 2020-3_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic.aocx | -| instance-segmentation-security-1025 | 2020-3_PL2_FP11_InceptionV1_ResNet_VGG.aocx | 2020-3_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic.aocx | -| landmarks-regression-retail-0009 | 2020-3_PL2_FP11_SqueezeNet.aocx | 2020-3_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic.aocx | -| license-plate-recognition-barrier-0001 | 2020-3_PL2_FP11_SqueezeNet.aocx | 2020-3_PL2_FP16_SqueezeNet_TinyYolo_VGG.aocx | -| pedestrian-and-vehicle-detector-adas-0001 | 2020-3_PL2_FP11_YoloV3_ELU.aocx | 2020-3_PL2_FP16_SwishExcitation.aocx | -| pedestrian-detection-adas-0002 | 2020-3_PL2_FP11_YoloV3_ELU.aocx | 2020-3_PL2_FP16_SwishExcitation.aocx | -| pedestrian-detection-adas-binary-0001 | 2020-3_PL2_FP11_AlexNet_GoogleNet_Generic.aocx | 2020-3_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic.aocx | -| person-attributes-recognition-crossroad-0230 | 2020-3_PL2_FP11_SqueezeNet.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| person-detection-action-recognition-0005 | 2020-3_PL2_FP11_YoloV3_ELU.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| person-detection-action-recognition-0006 | 2020-3_PL2_FP11_YoloV3_ELU.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| person-detection-action-recognition-teacher-0002 | 2020-3_PL2_FP11_YoloV3_ELU.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| person-detection-asl-0001 | 2020-3_PL2_FP11_AlexNet_GoogleNet_Generic.aocx | 2020-3_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic.aocx | -| person-detection-raisinghand-recognition-0001 | 2020-3_PL2_FP11_YoloV3_ELU.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| person-detection-retail-0002 | 2020-3_PL2_FP11_InceptionV1_ResNet_VGG.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| person-detection-retail-0013 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| person-reidentification-retail-0031 | 2020-3_PL2_FP11_YoloV3_ELU.aocx | 2020-3_PL2_FP16_ELU.aocx | -| person-reidentification-retail-0248 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| person-reidentification-retail-0249 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| person-reidentification-retail-0300 | 2020-3_PL2_FP11_InceptionV1_ResNet_VGG.aocx | 2020-3_PL2_FP16_SwishExcitation.aocx | -| person-vehicle-bike-detection-crossroad-0078 | 2020-3_PL2_FP11_YoloV3_ELU.aocx | 2020-3_PL2_FP16_ELU.aocx | -| person-vehicle-bike-detection-crossroad-1016 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_SwishExcitation.aocx | -| product-detection-0001 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| resnet18-xnor-binary-onnx-0001 | 2020-3_PL2_FP11_AlexNet_GoogleNet_Generic.aocx | 2020-3_PL2_FP16_RMNet.aocx | -| resnet50-binary-0001 | 2020-3_PL2_FP11_AlexNet_GoogleNet_Generic.aocx | 2020-3_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic.aocx | -| road-segmentation-adas-0001 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| semantic-segmentation-adas-0001 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic.aocx | -| single-image-super-resolution-1032 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_RMNet.aocx | -| single-image-super-resolution-1033 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_RMNet.aocx | -| text-detection-0003 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| text-detection-0004 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| text-image-super-resolution-0001 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_RMNet.aocx | -| text-recognition-0012 | 2020-3_PL2_FP11_AlexNet_GoogleNet_Generic.aocx | 2020-3_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic.aocx | -| text-spotting-0002-detector | 2020-3_PL2_FP11_InceptionV1_ResNet_VGG.aocx | 2020-3_PL2_FP16_ResNet_YoloV3.aocx | -| text-spotting-0002-recognizer-decoder | 2020-3_PL2_FP11_AlexNet_GoogleNet_Generic.aocx | 2020-3_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic.aocx | -| text-spotting-0002-recognizer-encoder | 2020-3_PL2_FP11_InceptionV1_ResNet_VGG.aocx | 2020-3_PL2_FP16_SqueezeNet_TinyYolo_VGG.aocx | -| unet-camvid-onnx-0001 | 2020-3_PL2_FP11_InceptionV1_ResNet_VGG.aocx | 2020-3_PL2_FP16_SwishExcitation.aocx | -| vehicle-attributes-recognition-barrier-0039 | 2020-3_PL2_FP11_SqueezeNet.aocx | 2020-3_PL2_FP16_SqueezeNet_TinyYolo_VGG.aocx | -| vehicle-detection-adas-0002 | 2020-3_PL2_FP11_YoloV3_ELU.aocx | 2020-3_PL2_FP16_SwishExcitation.aocx | -| vehicle-detection-adas-binary-0001 | 2020-3_PL2_FP11_AlexNet_GoogleNet_Generic.aocx | 2020-3_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic.aocx | -| vehicle-license-plate-detection-barrier-0106 | 2020-3_PL2_FP11_MobileNet_Clamp.aocx | 2020-3_PL2_FP16_MobileNet_Clamp.aocx | -| yolo-v2-ava-0001 | 2020-3_PL2_FP11_SqueezeNet.aocx | 2020-3_PL2_FP16_SqueezeNet_TinyYolo_VGG.aocx | -| yolo-v2-ava-sparse-35-0001 | 2020-3_PL2_FP11_SqueezeNet.aocx | 2020-3_PL2_FP16_SqueezeNet_TinyYolo_VGG.aocx | -| yolo-v2-ava-sparse-70-0001 | 2020-3_PL2_FP11_SqueezeNet.aocx | 2020-3_PL2_FP16_SqueezeNet_TinyYolo_VGG.aocx | -| yolo-v2-tiny-ava-0001 | 2020-3_PL2_FP11_SqueezeNet.aocx | 2020-3_PL2_FP16_ResNet_YoloV3.aocx | -| yolo-v2-tiny-ava-sparse-30-0001 | 2020-3_PL2_FP11_SqueezeNet.aocx | 2020-3_PL2_FP16_ResNet_YoloV3.aocx | -| yolo-v2-tiny-ava-sparse-60-0001 | 2020-3_PL2_FP11_SqueezeNet.aocx | 2020-3_PL2_FP16_ResNet_YoloV3.aocx | - -
- -## Translate from Architecture to FPGA Bitstream Files - -Various FPGA bitstreams that support CNN are available in the OpenVINO™ toolkit package for FPGA. - -To select the correct bitstream (`.aocx`) file for an architecture, select a network (for example, Resnet-18) from the table above for either the Intel® Vision Accelerator Design with an Intel® Arria 10 FPGA (Speed Grade 1), Intel® Vision Accelerator Design with an Intel® Arria 10 FPGA (Speed Grade 2) or the Intel® Programmable Acceleration Card (PAC) with Intel® Arria® 10 GX FPGA and note the corresponding architecture. - -The following table describes several parameters that might help you to select the proper bitstream for your needs: - -| Name | Board | Precision | LRN Support | Leaky ReLU Support | PReLU Support | Clamp Support | ELU Support | -|:------------------------------------------|:--------------------------------------------------------------------------------|:----------|:------------|:-------------------|:--------------|:--------------|:------------| -| 2020-4_PL2_FP11_AlexNet_GoogleNet_Generic | Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) | FP11 | true | true | true | false | false | -| 2020-4_PL2_FP11_SqueezeNet | Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) | FP11 | false | true | true | false | false | -| 2020-4_PL2_FP11_MobileNet_Clamp | Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) | FP11 | false | true | true | true | false | -| 2020-4_PL2_FP11_InceptionV1_ResNet_VGG | Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) | FP11 | false | false | false | false | false | -| 2020-4_PL2_FP11_RMNet | Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) | FP11 | false | true | true | false | true | -| 2020-4_PL2_FP11_TinyYolo_SSD300 | Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) | FP11 | true | true | true | false | false | -| 2020-4_PL2_FP11_YoloV3_ELU | Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) | FP11 | false | true | true | false | true | -| 2020-4_PL2_FP11_Streaming_InternalUseOnly | Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) | FP11 | false | false | false | false | false | -| 2020-4_PL2_FP11_Streaming_Slicing_InternalUseOnly | Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) | FP11 | false | false | false | false | false | -| 2020-4_PL2_FP11_SwishExcitation | Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) | FP11 | false | false | false | false | false | -| 2020-4_PL2_FP16_AlexNet_GoogleNet_SSD300_Generic | Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) | FP16 | true | true | true | false | false | -| 2020-4_PL2_FP16_ELU | Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) | FP16 | false | true | true | false | true | -| 2020-4_PL2_FP16_MobileNet_Clamp | Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) | FP16 | false | true | true | true | false | -| 2020-4_PL2_FP16_ResNet_YoloV3 | Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) | FP16 | false | true | true | false | false | -| 2020-4_PL2_FP16_RMNet | Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) | FP16 | false | true | true | false | true | -| 2020-4_PL2_FP16_SqueezeNet_TinyYolo_VGG | Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) | FP16 | false | true | true | false | false | -| 2020-4_PL2_FP16_SqueezeNet_TinyYolo_VGG | Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Speed Grade 2) | FP16 | false | false | false | false | false | -| 2020-4_RC_FP11_AlexNet_GoogleNet_Generic | Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA | FP11 | true | true | true | false | false | -| 2020-4_RC_FP11_RMNet | Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA | FP11 | false | true | true | false | true | -| 2020-4_RC_FP11_Streaming_InternalUseOnly | Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA | FP11 | true | false | false | false | false | -| 2020-4_RC_FP11_Streaming_Slicing_InternalUseOnly | Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA | FP11 | true | false | false | false | false | -| 2020-4_RC_FP11_ELU | Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA | FP11 | false | true | true | false | true | -| 2020-4_RC_FP11_SwishExcitation | Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA | FP11 | false | false | false | false | false | -| 2020-4_RC_FP11_InceptionV1_ResNet_SqueezeNet_TinyYolo_YoloV3 | Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA | FP11 | false | true | true | false | false | -| 2020-4_RC_FP11_MobileNet_Clamp | Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA | FP11 | false | true | true | true | false | -| 2020-4_RC_FP16_AlexNet_GoogleNet_Generic | Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA | FP16 | true | true | true | false | false | -| 2020-4_RC_FP16_InceptionV1_SqueezeNet_TinyYolo_VGG | Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA | FP16 | false | true | true | false | false | -| 2020-4_RC_FP16_RMNet | Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA | FP16 | false | true | true | false | true | -| 2020-4_RC_FP16_SwishExcitation | Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA | FP16 | false | false | false | false | false | -| 2020-4_RC_FP16_MobileNet_Clamp | Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA | FP16 | false | true | true | true | false | -| 2020-4_RC_FP16_ResNet_YoloV3 | Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA | FP16 | false | true | true | false | false | -| 2020-4_RC_FP16_InceptionV1_SqueezeNet_YoloV3 | Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA | FP16 | false | true | true | false | false | - -## Set Environment for Running the FPGA Plugin - -To make the FPGA plugin run directly or through the heterogeneous plugin, set up the environment: -1. Set up environment to access Intel® FPGA RTE for OpenCL: -``` -source /opt/altera/aocl-pro-rte/aclrte-linux64/init_opencl.sh -``` -2. Set the following environment variable and program the board with a DLA bitstream. Programming of the board is not supported during runtime and must be done before running an application. - - | Variable | Setting | - | :----------------------------------| :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| - | ACL_PCIE_USE_JTAG_PROGRAMMING | Set this variable to a value of 1 to force FPGA reprogramming using JTAG | - -## Analyzing Heterogeneous Execution - -Besides generation of .dot files, you can use the error listening mechanism: - -```cpp -class FPGA_ErrorListener : public InferenceEngine::IErrorListener -{ -public: - virtual void onError(const char *msg) noexcept override { - std::cout << msg; - } -}; -... -FPGA_ErrorListener err_listener; -core.SetLogCallback(err_listener); // will be used for FPGA device as well -``` -If during network loading some layers are decided to be executed on a fallback plugin, the following message is printed: - -```cpp -Layer (Name: detection_out, Type: DetectionOutput) is not supported: - custom or unknown. - Has (3) sets of inputs, must be 1, or 2. - Input dimensions (2) should be 4. -``` - -## Multiple FPGA Devices Support - -The Inference Engine FPGA plugin provides an ability to load different networks on multiple FPGA devices. For example, to load two networks AlexNet and MobileNet v2 on two different FPGA devices, follow the steps below: - -1. Program each FGPA device with a corresponding bitstream: -```bash -aocl program acl0 2019R3_PV_PL1_FP16_AlexNet_GoogleNet_InceptionV1_SSD300_Generic.aocx -``` -```bash -aocl program acl1 2019R3_PV_PL1_FP16_MobileNet_Clamp.aocx -``` -For more information about bitstream programming instructions, refer to [Installation Guide for Linux* with Support for FPGA](Supported_Devices.md) -2. All FPGA devices are enumerated with unique ID starting from `0`. By default, all networks are loaded to the default -device with ID `0`. If you want to load a network on a particular non-default device, specify the `KEY_DEVICE_ID` -parameter for C++ and `DEVICE_ID` parameter for Python\*. -The following code snippets demonstrates how to load the AlexNet network on the FPGA device with ID `0` and the -MobileNet v2 network on the device with ID `1`: - * With C++: -```cpp -InferenceEngine::Core core; - -// Load AlexNet network on the first FPGA device programmed with bitstream supporting AlexNet -auto alexnetNetwork = core.ReadNetwork("alexnet.xml"); -auto exeNetwork1 = core.LoadNetwork(alexnetNetwork, "FPGA.0"); - -// Load MobileNet network on the second FPGA device programmed with MobileNet bitstream -auto mobilenetNetwork = core.ReadNetwork("mobilenet_v2.xml"); -auto exeNetwork2 = core.LoadNetwork(mobilenetNetwork, "FPGA", { { KEY_DEVICE_ID, "1" } }); -``` - * With Python: -```python -# Load AlexNet network on the first FPGA device programmed with bitstream supporting AlexNet -net1 = IENetwork(model="alexnet.xml", weights="alexnet.bin") -plugin.load(network=net1, config={"DEVICE_ID": "0"}) - -# Load MobileNet network on the second FPGA device programmed with MobileNet bitstream -net2 = IENetwork(model="mobilenet_v2.xml", weights="mobilenet_v2.bin") -plugin.load(network=net2, config={"DEVICE_ID": "1"}) -``` -Note that you have to use asynchronous infer requests to utilize several FPGA devices, otherwise the execution on devices is performed sequentially. - -## Import and Export Network Flow - -Since the 2019 R4 release, FPGA and HETERO plugins support the export and import flow, which allows to export a compiled network from a plugin to a binary blob by running the command below: - -```bash -$ ./compile_tool -m resnet.xml -DLA_ARCH_NAME 4x2x16x32_fp16_sb9408_fcd1024_actk4_poolk4_normk1_owk2_image300x300x8192_mbfr -d HETERO:FPGA,CPU -Inference Engine: - API version ............ 2.1 - Build .................. 6db44e09a795cb277a63275ea1395bfcb88e46ac - Description ....... API -Done -``` - -Once the command is executed, the binary blob named `resnet.blob` is created at the working directory. Refer to the [Compile tool](../../../inference-engine/tools/compile_tool/README.md) documentation for more details. - -A compiled binary blob can be later imported via `InferenceEngine::Core::Import`: - -```cpp -InferenceEngine::Core core; -std::ifstream strm("resnet.blob"); -auto execNetwork = core.Import(strm); -``` - -## How to Interpret Performance Counters - -As a result of collecting performance counters using InferenceEngine::InferRequest::GetPerformanceCounts you can find out performance data about execution on FPGA, pre-processing and post-processing data and data transferring from/to FPGA card. - -If network is sliced to two parts that are executed on CPU, you can find performance data about Intel® MKL-DNN kernels, their types, and other useful information. - -## Limitations of the FPGA Support for CNN - -The Inference Engine FPGA plugin has limitations on network topologies, kernel parameters, and batch size. - -* Depending on the bitstream loaded on the target device, the FPGA performs calculations with precision rates ranging from FP11 to FP16. This might have accuracy implications. Use the [Accuracy Checker](@ref omz_tools_accuracy_checker_README) to verify the network accuracy on the validation data set. -* Networks that have many CNN layers that are not supported on FPGA stayed in topologies between supported layers might lead to dividing of graph to many subgraphs that might lead to `CL_OUT_OF_HOST_MEMORY` error. These topologies are not FPGA friendly for this release. -* When you use the heterogeneous plugin, the affinity and distribution of nodes by devices depends on the FPGA bitstream that you use. Some layers might not be supported by a bitstream or parameters of the layer are not supported by the bitstream. - -## See Also -* [Supported Devices](Supported_Devices.md) +For documentation for the FPGA plugin available in previous releases of Intel® Distribution of OpenVINO™ toolkit with FPGA Support, see documentation for the [2020.4 version](https://docs.openvinotoolkit.org/2020.4/openvino_docs_IE_DG_supported_plugins_FPGA.html) and lower. \ No newline at end of file diff --git a/docs/IE_DG/supported_plugins/Supported_Devices.md b/docs/IE_DG/supported_plugins/Supported_Devices.md index 7e4111837a14bb..744972c3dd7645 100644 --- a/docs/IE_DG/supported_plugins/Supported_Devices.md +++ b/docs/IE_DG/supported_plugins/Supported_Devices.md @@ -11,7 +11,6 @@ The Inference Engine provides unique capabilities to infer deep learning models |------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------| |[GPU plugin](CL_DNN.md) |Intel® Processor Graphics, including Intel® HD Graphics and Intel® Iris® Graphics | |[CPU plugin](CPU.md) |Intel® Xeon® with Intel® Advanced Vector Extensions 2 (Intel® AVX2), Intel® Advanced Vector Extensions 512 (Intel® AVX-512), and AVX512_BF16, Intel® Core™ Processors with Intel® AVX2, Intel® Atom® Processors with Intel® Streaming SIMD Extensions (Intel® SSE) | -|[FPGA plugin](FPGA.md) (available in the Intel® Distribution of OpenVINO™ toolkit) |Intel® Vision Accelerator Design with an Intel® Arria 10 FPGA (Speed Grade 2), Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA | |[VPU plugins](VPU.md) (available in the Intel® Distribution of OpenVINO™ toolkit) |Intel® Neural Compute Stick 2 powered by the Intel® Movidius™ Myriad™ X, Intel® Vision Accelerator Design with Intel® Movidius™ VPUs | |[GNA plugin](GNA.md) (available in the Intel® Distribution of OpenVINO™ toolkit) |Intel® Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel® Pentium® Silver J5005 Processor, Intel® Pentium® Silver N5000 Processor, Intel® Celeron® J4005 Processor, Intel® Celeron® J4105 Processor, Intel® Celeron® Processor N4100, Intel® Celeron® Processor N4000, Intel® Core™ i3-8121U Processor, Intel® Core™ i7-1065G7 Processor, Intel® Core™ i7-1060G7 Processor, Intel® Core™ i5-1035G4 Processor, Intel® Core™ i5-1035G7 Processor, Intel® Core™ i5-1035G1 Processor, Intel® Core™ i5-1030G7 Processor, Intel® Core™ i5-1030G4 Processor, Intel® Core™ i3-1005G1 Processor, Intel® Core™ i3-1000G1 Processor, Intel® Core™ i3-1000G4 Processor| |[Multi-Device plugin](MULTI.md) |Multi-Device plugin enables simultaneous inference of the same network on several Intel® devices in parallel | @@ -53,7 +52,6 @@ For example, the CHW value at index (c,h,w) is physically located at index (c\*H |:-------------|:----------------------:|:----------------------:|:----------------------:| |CPU plugin |Supported and preferred |Supported |Supported | |GPU plugin |Supported |Supported and preferred |Supported\* | -|FPGA plugin |Supported |Supported |Not supported | |VPU plugins |Not supported |Supported |Not supported | |GNA plugin |Supported |Supported |Not supported |
\* - currently, only limited set of topologies might benefit from enabling I8 model on GPU
@@ -66,7 +64,6 @@ the supported models formats depends on the actual underlying devices. _Generall |:-------------|:--------:|:-------------:|:-------------:|:-------------:|:------------:|:-------------:| |CPU plugin |Supported |Not supported |Supported |Supported |Not supported |Supported | |GPU plugin |Supported |Supported\* |Supported\* |Supported\* |Not supported |Supported\* | -|FPGA plugin |Supported |Supported\* |Supported |Supported |Not supported |Supported | |VPU plugins |Supported |Supported |Supported |Not supported |Not supported |Not supported | |GNA plugin |Supported |Not supported |Supported |Not supported |Supported |Supported | @@ -80,7 +77,6 @@ the supported input precision depends on the actual underlying devices. _Genera |:-------------|:--------:|:------------:| |CPU plugin |Supported |Not supported | |GPU plugin |Supported |Supported | -|FPGA plugin |Supported |Supported | |VPU plugins |Supported |Supported | |GNA plugin |Supported |Not supported | For [Multi-Device](MULTI.md) and [Heterogeneous](HETERO.md) execution @@ -92,7 +88,6 @@ the supported output precision depends on the actual underlying devices. _Gener |:-------------|:------------:|:------------:|:------------:|:------------:| |CPU plugin |Supported |Supported |Supported |Supported | |GPU plugin |Supported |Supported |Supported |Supported | -|FPGA plugin |Not supported |Supported |Supported |Not supported | |VPU plugins |Not supported |Supported |Supported |Supported | |GNA plugin |Not supported |Not supported |Not supported |Supported | @@ -109,152 +104,152 @@ For setting relevant configuration, refer to the ### Supported Layers The following layers are supported by the plugins and by [Shape Inference feature](../ShapeInference.md): -| Layers | GPU | CPU | VPU | GNA | FPGA | ShapeInfer | -|:-------------------------------|:-------------:|:-------------:|:-------------:|:-------------:|:---------------:|:-------------:| -| Abs | Supported | Supported\*\* | Supported | Not Supported | Not Supported | Supported | -| Acos | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Acosh | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Activation-Clamp | Supported |Supported\*\*\*| Supported | Supported | Supported | Supported | -| Activation-ELU | Supported |Supported\*\*\*| Supported | Not Supported | Supported | Supported | -| Activation-Exp | Supported |Supported\*\*\*| Not Supported | Supported | Not Supported | Supported | -| Activation-Leaky ReLU | Supported |Supported\*\*\*| Supported | Supported | Supported | Supported | -| Activation-Not | Supported |Supported\*\*\*| Not Supported | Not Supported | Not Supported | Supported | -| Activation-PReLU | Supported |Supported\*\*\*| Supported | Not Supported | Supported | Supported | -| Activation-ReLU | Supported |Supported\*\*\*| Supported | Supported | Supported | Supported | -| Activation-ReLU6 | Supported |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| Activation-Sigmoid/Logistic | Supported |Supported\*\*\*| Supported | Supported | Not Supported | Supported | -| Activation-TanH | Supported |Supported\*\*\*| Supported | Supported | Not Supported | Supported | -| ArgMax | Supported | Supported\*\* | Supported | Not Supported | Not Supported | Supported | -| Asin | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Asinh | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Atan | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Atanh | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| BatchNormalization | Supported | Supported | Supported | Not Supported | Supported\* | Supported | -| BinaryConvolution | Supported | Supported | Not Supported | Not Supported | Not Supported | Supported | -| Broadcast | Supported | Supported\*\* | Supported | Not Supported | Not Supported | Supported | -| Ceil | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Concat | Supported |Supported\*\*\*| Supported | Supported | Supported | Supported | -| Const | Supported | Supported | Supported | Supported | Not Supported | Not Supported | -| Convolution-Dilated | Supported | Supported | Supported | Not Supported | Supported | Supported | -| Convolution-Dilated 3D | Supported | Supported | Not Supported | Not Supported | Not Supported | Not Supported | -| Convolution-Grouped | Supported | Supported | Supported | Not Supported | Supported | Supported | -| Convolution-Grouped 3D | Supported | Supported | Not Supported | Not Supported | Not Supported | Not Supported | -| Convolution-Ordinary | Supported | Supported | Supported | Supported\* | Supported | Supported | -| Convolution-Ordinary 3D | Supported | Supported | Not Supported | Not Supported | Not Supported | Not Supported | -| Cos | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Cosh | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Crop | Supported | Supported | Supported | Supported | Not Supported | Supported | -| CTCGreedyDecoder | Supported\*\* | Supported\*\* | Supported\* | Not Supported | Not Supported | Supported | -| Deconvolution | Supported | Supported | Supported | Not Supported | Supported\* | Supported | -| Deconvolution 3D | Supported | Supported | Not Supported | Not Supported | Not Supported | Not Supported | -| DeformableConvolution | Supported | Supported | Not Supported | Not Supported | Not Supported | Supported | -| DepthToSpace | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| DetectionOutput | Supported | Supported\*\* | Supported\* | Not Supported | Not Supported | Supported | -| Eltwise-And | Supported |Supported\*\*\*| Not Supported | Not Supported | Not Supported | Supported | -| Eltwise-Add | Supported |Supported\*\*\*| Not Supported | Not Supported | Supported | Supported | -| Eltwise-Div | Supported |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| Eltwise-Equal | Supported |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| Eltwise-FloorMod | Supported |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| Eltwise-Greater | Supported |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| Eltwise-GreaterEqual | Supported |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| Eltwise-Less | Supported |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| Eltwise-LessEqual | Supported |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| Eltwise-LogicalAnd | Supported |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| Eltwise-LogicalOr | Supported |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| Eltwise-LogicalXor | Supported |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| Eltwise-Max | Supported |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| Eltwise-Min | Supported |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| Eltwise-Mul | Supported |Supported\*\*\*| Supported | Supported | Not Supported | Supported | -| Eltwise-NotEqual | Supported |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| Eltwise-Pow | Supported |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| Eltwise-Prod | Supported |Supported\*\*\*| Supported | Supported | Not Supported | Supported | -| Eltwise-SquaredDiff | Supported |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| Eltwise-Sub | Supported |Supported\*\*\*| Supported | Supported | Supported | Supported | -| Eltwise-Sum | Supported |Supported\*\*\*| Supported | Supported | Supported | Supported | -| Erf | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Exp | Supported | Supported | Not Supported | Supported | Not Supported | Supported | -| FakeQuantize | Not Supported | Supported | Not Supported | Not Supported | Not Supported | Supported | -| Fill | Not Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Flatten | Supported | Supported | Supported | Not Supported | Not Supported | Supported | -| Floor | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| FullyConnected (Inner Product) | Supported |Supported\*\*\*| Supported | Supported | Supported | Supported | -| Gather | Supported | Supported\*\* | Supported | Not Supported | Not Supported | Supported | -| GatherTree | Not Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Gemm | Supported | Supported | Supported | Not Supported | Not Supported | Supported | -| GRN | Supported\*\* | Supported\*\* | Supported | Not Supported | Not Supported | Supported | -| HardSigmoid | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Interp | Supported\*\* | Supported\*\* | Supported | Not Supported | Not Supported | Supported\* | -| Log | Supported | Supported\*\* | Supported | Supported | Not Supported | Supported | -| LRN (Norm) | Supported | Supported | Supported | Not Supported | Supported | Supported | -| LSTMCell | Supported | Supported | Supported | Supported | Not Supported | Not Supported | -| GRUCell | Supported | Supported | Not Supported | Not Supported | Not Supported | Not Supported | -| RNNCell | Supported | Supported | Not Supported | Not Supported | Not Supported | Not Supported | -| LSTMSequence | Supported | Supported | Supported | Not Supported | Not Supported | Not Supported | -| GRUSequence | Supported | Supported | Not Supported | Not Supported | Not Supported | Not Supported | -| RNNSequence | Supported | Supported | Not Supported | Not Supported | Not Supported | Not Supported | -| LogSoftmax | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Not Supported | -| Memory | Not Supported | Supported | Not Supported | Supported | Not Supported | Supported | -| MVN | Supported | Supported\*\* | Supported\* | Not Supported | Not Supported | Supported | -| Neg | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| NonMaxSuppression | Not Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Normalize | Supported | Supported\*\* | Supported\* | Not Supported | Not Supported | Supported | -| OneHot | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Pad | Supported | Supported\*\* | Supported\* | Not Supported | Not Supported | Supported | -| Permute | Supported | Supported | Supported | Supported\* | Not Supported | Supported | -| Pooling(AVG,MAX) | Supported | Supported | Supported | Supported | Supported | Supported | -| Pooling(AVG,MAX) 3D | Supported | Supported | Not Supported | Not Supported | Not Supported | Not Supported | -| Power | Supported | Supported\*\* | Supported | Supported\* | Supported\* | Supported | -| PowerFile | Not Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Not Supported | -| PriorBox | Supported | Supported\*\* | Supported | Not Supported | Not Supported | Supported | -| PriorBoxClustered | Supported\*\* | Supported\*\* | Supported | Not Supported | Not Supported | Supported | -| Proposal | Supported | Supported\*\* | Supported | Not Supported | Not Supported | Supported | -| PSROIPooling | Supported | Supported\*\* | Supported | Not Supported | Not Supported | Supported | -| Range | Not Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Reciprocal | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| ReduceAnd | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| ReduceL1 | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| ReduceL2 | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| ReduceLogSum | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| ReduceLogSumExp | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| ReduceMax | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| ReduceMean | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| ReduceMin | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| ReduceOr | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| ReduceProd | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| ReduceSum | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| ReduceSumSquare | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| RegionYolo | Supported | Supported\*\* | Supported | Not Supported | Not Supported | Supported | -| ReorgYolo | Supported | Supported\*\* | Supported | Not Supported | Not Supported | Supported | -| Resample | Supported | Supported\*\* | Supported | Not Supported | Supported\* | Supported | -| Reshape | Supported |Supported\*\*\*| Supported | Supported | Not Supported | Supported\* | -| ReverseSequence | Supported | Supported\*\* | Supported | Not Supported | Not Supported | Supported | -| RNN | Not Supported | Supported | Supported | Not Supported | Not Supported | Not Supported | -| ROIPooling | Supported\* | Supported | Supported | Not Supported | Not Supported | Supported | -| ScaleShift | Supported |Supported\*\*\*| Supported\* | Supported | Supported | Supported | -| ScatterUpdate | Not Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Select | Supported | Supported | Supported | Not Supported | Not Supported | Supported | -| Selu | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| ShuffleChannels | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Sign | Supported | Supported\*\* | Supported | Not Supported | Not Supported | Supported | -| Sin | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Sinh | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| SimplerNMS | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Slice | Supported |Supported\*\*\*| Supported | Supported | Supported\* | Supported | -| SoftMax | Supported |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| Softplus | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Softsign | Supported | Supported\*\* | Not Supported | Supported | Not Supported | Supported | -| SpaceToDepth | Not Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| SpatialTransformer | Not Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Split | Supported |Supported\*\*\*| Supported | Supported | Supported\* | Supported | -| Squeeze | Supported | Supported\*\* | Supported | Supported | Not Supported | Supported | -| StridedSlice | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Tan | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| TensorIterator | Not Supported | Supported | Supported | Supported | Not Supported | Not Supported | -| Tile | Supported\*\* |Supported\*\*\*| Supported | Not Supported | Not Supported | Supported | -| TopK | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | Supported | -| Unpooling | Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | -| Unsqueeze | Supported | Supported\*\* | Supported | Supported | Not Supported | Supported | -| Upsampling | Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | +| Layers | GPU | CPU | VPU | GNA | ShapeInfer | +|:-------------------------------|:-------------:|:-------------:|:-------------:|:-------------:|:-------------:| +| Abs | Supported | Supported\*\* | Supported | Not Supported | Supported | +| Acos | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Acosh | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Activation-Clamp | Supported |Supported\*\*\*| Supported | Supported | Supported | +| Activation-ELU | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Activation-Exp | Supported |Supported\*\*\*| Not Supported | Supported | Supported | +| Activation-Leaky ReLU | Supported |Supported\*\*\*| Supported | Supported | Supported | +| Activation-Not | Supported |Supported\*\*\*| Not Supported | Not Supported | Supported | +| Activation-PReLU | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Activation-ReLU | Supported |Supported\*\*\*| Supported | Supported | Supported | +| Activation-ReLU6 | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Activation-Sigmoid/Logistic | Supported |Supported\*\*\*| Supported | Supported | Supported | +| Activation-TanH | Supported |Supported\*\*\*| Supported | Supported | Supported | +| ArgMax | Supported | Supported\*\* | Supported | Not Supported | Supported | +| Asin | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Asinh | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Atan | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Atanh | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| BatchNormalization | Supported | Supported | Supported | Not Supported | Supported | +| BinaryConvolution | Supported | Supported | Not Supported | Not Supported | Supported | +| Broadcast | Supported | Supported\*\* | Supported | Not Supported | Supported | +| Ceil | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Concat | Supported |Supported\*\*\*| Supported | Supported | Supported | +| Const | Supported | Supported | Supported | Supported | Not Supported | +| Convolution-Dilated | Supported | Supported | Supported | Not Supported | Supported | +| Convolution-Dilated 3D | Supported | Supported | Not Supported | Not Supported | Not Supported | +| Convolution-Grouped | Supported | Supported | Supported | Not Supported | Supported | +| Convolution-Grouped 3D | Supported | Supported | Not Supported | Not Supported | Not Supported | +| Convolution-Ordinary | Supported | Supported | Supported | Supported\* | Supported | +| Convolution-Ordinary 3D | Supported | Supported | Not Supported | Not Supported | Not Supported | +| Cos | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Cosh | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Crop | Supported | Supported | Supported | Supported | Supported | +| CTCGreedyDecoder | Supported\*\* | Supported\*\* | Supported\* | Not Supported | Supported | +| Deconvolution | Supported | Supported | Supported | Not Supported | Supported | +| Deconvolution 3D | Supported | Supported | Not Supported | Not Supported | Not Supported | +| DeformableConvolution | Supported | Supported | Not Supported | Not Supported | Supported | +| DepthToSpace | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| DetectionOutput | Supported | Supported\*\* | Supported\* | Not Supported | Supported | +| Eltwise-And | Supported |Supported\*\*\*| Not Supported | Not Supported | Supported | +| Eltwise-Add | Supported |Supported\*\*\*| Not Supported | Not Supported | Supported | +| Eltwise-Div | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Eltwise-Equal | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Eltwise-FloorMod | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Eltwise-Greater | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Eltwise-GreaterEqual | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Eltwise-Less | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Eltwise-LessEqual | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Eltwise-LogicalAnd | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Eltwise-LogicalOr | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Eltwise-LogicalXor | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Eltwise-Max | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Eltwise-Min | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Eltwise-Mul | Supported |Supported\*\*\*| Supported | Supported | Supported | +| Eltwise-NotEqual | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Eltwise-Pow | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Eltwise-Prod | Supported |Supported\*\*\*| Supported | Supported | Supported | +| Eltwise-SquaredDiff | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Eltwise-Sub | Supported |Supported\*\*\*| Supported | Supported | Supported | +| Eltwise-Sum | Supported |Supported\*\*\*| Supported | Supported | Supported | +| Erf | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Exp | Supported | Supported | Not Supported | Supported | Supported | +| FakeQuantize | Not Supported | Supported | Not Supported | Not Supported | Supported | +| Fill | Not Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Flatten | Supported | Supported | Supported | Not Supported | Supported | +| Floor | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| FullyConnected (Inner Product) | Supported |Supported\*\*\*| Supported | Supported | Supported | +| Gather | Supported | Supported\*\* | Supported | Not Supported | Supported | +| GatherTree | Not Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Gemm | Supported | Supported | Supported | Not Supported | Supported | +| GRN | Supported\*\* | Supported\*\* | Supported | Not Supported | Supported | +| HardSigmoid | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Interp | Supported\*\* | Supported\*\* | Supported | Not Supported | Supported\* | +| Log | Supported | Supported\*\* | Supported | Supported | Supported | +| LRN (Norm) | Supported | Supported | Supported | Not Supported | Supported | +| LSTMCell | Supported | Supported | Supported | Supported | Not Supported | +| GRUCell | Supported | Supported | Not Supported | Not Supported | Not Supported | +| RNNCell | Supported | Supported | Not Supported | Not Supported | Not Supported | +| LSTMSequence | Supported | Supported | Supported | Not Supported | Not Supported | +| GRUSequence | Supported | Supported | Not Supported | Not Supported | Not Supported | +| RNNSequence | Supported | Supported | Not Supported | Not Supported | Not Supported | +| LogSoftmax | Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | +| Memory | Not Supported | Supported | Not Supported | Supported | Supported | +| MVN | Supported | Supported\*\* | Supported\* | Not Supported | Supported | +| Neg | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| NonMaxSuppression | Not Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Normalize | Supported | Supported\*\* | Supported\* | Not Supported | Supported | +| OneHot | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Pad | Supported | Supported\*\* | Supported\* | Not Supported | Supported | +| Permute | Supported | Supported | Supported | Supported\* | Supported | +| Pooling(AVG,MAX) | Supported | Supported | Supported | Supported | Supported | +| Pooling(AVG,MAX) 3D | Supported | Supported | Not Supported | Not Supported | Not Supported | +| Power | Supported | Supported\*\* | Supported | Supported\* | Supported | +| PowerFile | Not Supported | Supported\*\* | Not Supported | Not Supported | Not Supported | +| PriorBox | Supported | Supported\*\* | Supported | Not Supported | Supported | +| PriorBoxClustered | Supported\*\* | Supported\*\* | Supported | Not Supported | Supported | +| Proposal | Supported | Supported\*\* | Supported | Not Supported | Supported | +| PSROIPooling | Supported | Supported\*\* | Supported | Not Supported | Supported | +| Range | Not Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Reciprocal | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| ReduceAnd | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| ReduceL1 | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| ReduceL2 | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| ReduceLogSum | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| ReduceLogSumExp | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| ReduceMax | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| ReduceMean | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| ReduceMin | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| ReduceOr | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| ReduceProd | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| ReduceSum | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| ReduceSumSquare | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| RegionYolo | Supported | Supported\*\* | Supported | Not Supported | Supported | +| ReorgYolo | Supported | Supported\*\* | Supported | Not Supported | Supported | +| Resample | Supported | Supported\*\* | Supported | Not Supported | Supported | +| Reshape | Supported |Supported\*\*\*| Supported | Supported | Supported\* | +| ReverseSequence | Supported | Supported\*\* | Supported | Not Supported | Supported | +| RNN | Not Supported | Supported | Supported | Not Supported | Not Supported | +| ROIPooling | Supported\* | Supported | Supported | Not Supported | Supported | +| ScaleShift | Supported |Supported\*\*\*| Supported\* | Supported | Supported | +| ScatterUpdate | Not Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Select | Supported | Supported | Supported | Not Supported | Supported | +| Selu | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| ShuffleChannels | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Sign | Supported | Supported\*\* | Supported | Not Supported | Supported | +| Sin | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Sinh | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| SimplerNMS | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Slice | Supported |Supported\*\*\*| Supported | Supported | Supported | +| SoftMax | Supported |Supported\*\*\*| Supported | Not Supported | Supported | +| Softplus | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Softsign | Supported | Supported\*\* | Not Supported | Supported | Supported | +| SpaceToDepth | Not Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| SpatialTransformer | Not Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Split | Supported |Supported\*\*\*| Supported | Supported | Supported | +| Squeeze | Supported | Supported\*\* | Supported | Supported | Supported | +| StridedSlice | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Tan | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| TensorIterator | Not Supported | Supported | Supported | Supported | Not Supported | +| Tile | Supported\*\* |Supported\*\*\*| Supported | Not Supported | Supported | +| TopK | Supported | Supported\*\* | Not Supported | Not Supported | Supported | +| Unpooling | Supported | Not Supported | Not Supported | Not Supported | Not Supported | +| Unsqueeze | Supported | Supported\*\* | Supported | Supported | Supported | +| Upsampling | Supported | Not Supported | Not Supported | Not Supported | Not Supported | \*- support is limited to the specific parameters. Refer to "Known Layers Limitation" section for the device [from the list of supported](Supported_Devices.md). diff --git a/docs/Inference_Engine_Development_Procedure/IE_Dev_Procedure.md b/docs/Inference_Engine_Development_Procedure/IE_Dev_Procedure.md index 2be7f8dcb476be..f9638ee4cd9131 100644 --- a/docs/Inference_Engine_Development_Procedure/IE_Dev_Procedure.md +++ b/docs/Inference_Engine_Development_Procedure/IE_Dev_Procedure.md @@ -30,7 +30,6 @@ * [IE TESTS] * [IE DOCS] * [IE MKLDNN] - * [IE FPGA] * [IE GNA] * [IE CLDNN] * [IE MYRIAD] diff --git a/docs/get_started/get_started_linux.md b/docs/get_started/get_started_linux.md index 63ed0f3c16e5f3..1c3549e5f77e0e 100644 --- a/docs/get_started/get_started_linux.md +++ b/docs/get_started/get_started_linux.md @@ -197,7 +197,7 @@ Inputs you'll need to specify: - **A compiled OpenVINO™ code sample or demo application** that runs inferencing against a model that has been run through the Model Optimizer, resuiting in an IR, using the other inputs you provide. - **One or more models** in the Intermediate Representation format. Each model is trained for a specific task. Examples include pedestrian detection, face detection, vehicle detection, license plate recognition, head pose, and others. Different models are used for different applications. Models can be chained together to provide multiple features; for example vehicle + make/model + license plate recognition. - **One or more media files**. The media is typically a video file, but can be a still photo. -- **One or more target device** on which you run inference. The target device can be the CPU, GPU, FPGA, or VPU accelerator. +- **One or more target device** on which you run inference. The target device can be the CPU, GPU, or VPU accelerator. ### Build the Code Samples and Demo Applications diff --git a/docs/get_started/get_started_macos.md b/docs/get_started/get_started_macos.md index b8b88c2bbc604e..0c1eafc23d3ed6 100644 --- a/docs/get_started/get_started_macos.md +++ b/docs/get_started/get_started_macos.md @@ -192,7 +192,7 @@ Inputs you need to specify when using a code sample or demo application: - **A compiled OpenVINO™ code sample or demo application** that runs inferencing against a model that has been run through the Model Optimizer, resulting in an IR, using the other inputs you provide. - **One or more models** in the IR format. Each model is trained for a specific task. Examples include pedestrian detection, face detection, vehicle detection, license plate recognition, head pose, and others. Different models are used for different applications. Models can be chained together to provide multiple features; for example, vehicle + make/model + license plate recognition. - **One or more media files**. The media is typically a video file, but can be a still photo. -- **One or more target device** on which you run inference. The target device can be the CPU, FPGA, or VPU accelerator. +- **One or more target device** on which you run inference. The target device can be the CPU, or VPU accelerator. ### Build the Code Samples and Demo Applications diff --git a/docs/get_started/get_started_windows.md b/docs/get_started/get_started_windows.md index f32be343ff6f10..b22d46c60e1747 100644 --- a/docs/get_started/get_started_windows.md +++ b/docs/get_started/get_started_windows.md @@ -193,7 +193,7 @@ Inputs you need to specify when using a code sample or demo application: - **A compiled OpenVINO™ code sample or demo application** that runs inferencing against a model that has been run through the Model Optimizer, resulting in an IR, using the other inputs you provide. - **One or more models** in the IR format. Each model is trained for a specific task. Examples include pedestrian detection, face detection, vehicle detection, license plate recognition, head pose, and others. Different models are used for different applications. Models can be chained together to provide multiple features; for example, vehicle + make/model + license plate recognition. - **One or more media files**. The media is typically a video file, but can be a still photo. -- **One or more target device** on which you run inference. The target device can be the CPU, GPU, FPGA, or VPU accelerator. +- **One or more target device** on which you run inference. The target device can be the CPU, GPU, or VPU accelerator. ### Build the Code Samples and Demo Applications diff --git a/docs/how_tos/how-to-links.md b/docs/how_tos/how-to-links.md index 89c4f210c0d38e..cfbc2d65029696 100644 --- a/docs/how_tos/how-to-links.md +++ b/docs/how_tos/how-to-links.md @@ -33,13 +33,6 @@ To learn about what is *custom layers* and how to work with them in the Deep Lea -## Deploying Intel® FPGAs for Deep Learning Inferencing with OpenVINO™ Toolkit - -[![](https://img.youtube.com/vi/7yh1c8kJn1A/0.jpg)](https://www.youtube.com/watch?v=7yh1c8kJn1A) - - - - ## Computer Vision at the Edge with OpenVINO by Krishnakumar Shetti at ODSC_India [![](https://img.youtube.com/vi/RfRCrq35LXg/0.jpg)](https://www.youtube.com/watch?v=RfRCrq35LXg) diff --git a/docs/index.md b/docs/index.md index 7981d47dcff58e..9039d7fd98b891 100644 --- a/docs/index.md +++ b/docs/index.md @@ -7,7 +7,7 @@ OpenVINO™ toolkit quickly deploys applications and solutions that emulate huma OpenVINO™ toolkit: - Enables CNN-based deep learning inference on the edge -- Supports heterogeneous execution across an Intel® CPU, Intel® Integrated Graphics, Intel® FPGA, Intel® Neural Compute Stick 2 and Intel® Vision Accelerator Design with Intel® Movidius™ VPUs +- Supports heterogeneous execution across an Intel® CPU, Intel® Integrated Graphics, Intel® Neural Compute Stick 2 and Intel® Vision Accelerator Design with Intel® Movidius™ VPUs - Speeds time-to-market via an easy-to-use library of computer vision functions and pre-optimized kernels - Includes optimized calls for computer vision standards, including OpenCV\* and OpenCL™ @@ -47,7 +47,6 @@ OpenVINO™ toolkit includes the following components: OpenVINO™ toolkit documentation set includes the following documents: - [Install the Intel® Distribution of OpenVINO™ Toolkit for Linux*](install_guides/installing-openvino-linux.md) -- [Install the Intel® Distribution of OpenVINO™ Toolkit for Linux with FPGA Support](install_guides/installing-openvino-linux-fpga.md) - [Install the Intel® Distribution of OpenVINO™ Toolkit for Windows*](install_guides/installing-openvino-windows.md) - [Install the Intel® Distribution of OpenVINO™ Toolkit for macOS*](install_guides/installing-openvino-macos.md) - [Install the Intel® Distribution of OpenVINO™ Toolkit for Raspbian*](install_guides/installing-openvino-raspbian.md) diff --git a/docs/install_guides/PAC_Configure.md b/docs/install_guides/PAC_Configure.md index c4ccf081ac7774..0654eed7b966bb 100644 --- a/docs/install_guides/PAC_Configure.md +++ b/docs/install_guides/PAC_Configure.md @@ -1,239 +1,21 @@ # Configuration Guide for Intel® Distribution of OpenVINO™ toolkit 2020.4 and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA on CentOS or Ubuntu* {#openvino_docs_install_guides_PAC_Configure} -> **NOTE**: For previous versions, see [Configuration Guide for OpenVINO 2020.3](https://docs.openvinotoolkit.org/2020.3/_docs_install_guides_PAC_Configure.html), [Configuration Guide for OpenVINO 2020.2](https://docs.openvinotoolkit.org/2020.2/_docs_install_guides_PAC_Configure.html), [Configuration Guide for OpenVINO 2019R1/2019R2/2019R3](https://docs.openvinotoolkit.org/2019_R3.1/_docs_install_guides_PAC_Configure_2019RX.html), [Configuration Guide for OpenVINO 2018R5](https://docs.openvinotoolkit.org/2019_R1/_docs_install_guides_PAC_Configure_2018R5.html). +## Product Change Notice +Intel® Distribution of OpenVINO™ toolkit for Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA -## Get Started + + + + + + + + + +
Change Notice BeginsJuly 2020
Change DateOctober 2020
-The following describes the set-up of the Intel® Distribution of OpenVINO™ toolkit on CentOS* 7.4 or Ubuntu* 16.04, kernel 4.15. This is based upon a completely fresh install of the OS with developer tools included. Official Intel® documentation for the install process can be found in the following locations and it is highly recommended that these are read, especially for new users. This document serves as a guide, and in some cases, adds additional detail where necessary. +Intel will be transitioning to the next-generation programmable deep-learning solution based on FPGAs in order to increase the level of customization possible in FPGA deep-learning. As part of this transition, future standard releases (i.e., non-LTS releases) of Intel® Distribution of OpenVINO™ toolkit will no longer include the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. -[Intel® Acceleration Stack for FPGAs Quick Start Guide](https://www.intel.com/content/dam/www/programmable/us/en/pdfs/literature/ug/ug-qs-ias-v1-2-1.pdf) +Intel® Distribution of OpenVINO™ toolkit 2020.3.X LTS release will continue to support Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. For questions about next-generation programmable deep-learning solutions based on FPGAs, please talk to your sales representative or contact us to get the latest FPGA updates. -[OpenCL™ on Intel® PAC Quick Start Guide](https://www.intel.com/content/dam/altera-www/global/en_US/pdfs/literature/ug/ug-qs-ias-opencl-a10.pdf) - -[Installing the Intel® Distribution of OpenVINO™ toolkit for Linux*](installing-openvino-linux.md) - -(Optional): Install NTFS support for transferring large installers if already downloaded on another machine. -```sh -sudo yum -y install epel-release -``` -```sh -sudo yum -y install ntfs-3g -``` - -## Install Intel® PAC and the Intel® Programmable Acceleration Card Stack - -1. Download version 1.2.1 of the Acceleration Stack for Runtime from the [Intel FPGA Acceleration Hub](https://www.altera.com/solutions/acceleration-hub/downloads.html). -This downloads as `a10_gx_pac_ias_1_2_1_pv_rte.tar.gz`. Let it download to `~/Downloads`. - -2. Create a new directory to install to: -```sh -mkdir -p ~/tools/intelrtestack -``` - -3. Untar and launch the installer: -```sh -cd ~/Downloads -``` -```sh -tar xf a10_gx_pac_ias_1_2_1_pv_rte.tar.gz -``` -```sh -cd a10_gx_pac_ias_1_2_1_pv_rte_installer -``` -```sh -./setup.sh -``` - -4. Select **Y** to install OPAE and accept license and when asked, specify `/home//tools/intelrtestack` as the absolute install path. During the installation there should be a message stating the directory already exists as it was created in the first command above. Select **Y** to install to this directory. If this message is not seen, it suggests that there was a typo when entering the install location. - -5. Tools are installed to the following directories: - * OpenCL™ Run-time Environment: `~/tools/intelrtestack/opencl_rte/aclrte-linux64` - * Intel® Acceleration Stack for FPGAs: `~/tools/intelrtestack/a10_gx_pac_ias_1_2_1_pv` - -7. Check the version of the FPGA Interface Manager firmware on the PAC board. -```sh -sudo fpgainfo fme -``` - -8. If the reported FIM (`Pr Interface Id`) is not `38d782e3-b612-5343-b934-2433e348ac4c` then follow the instructions in Appendix A: Updating the FIM and BMC Firmware of the [Intel® Acceleration Stack for FPGAs Quick Start Guide](https://www.intel.com/content/dam/www/programmable/us/en/pdfs/literature/ug/ug-qs-ias-v1-2-1.pdf) to update the FIM and BMC. - -9. Run the built in self-test to verify operation of the Acceleration Stack and Intel® PAC in a non-virtualized environment. -```sh -sudo sh -c "echo 20 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages" -``` -```sh -source ~/tools/intelrtestack/init_env.sh -``` -```sh -sudo fpgabist $OPAE_PLATFORM_ROOT/hw/samples/nlb_mode_3/bin/nlb_mode_3.gbs -``` - -## Verify the Intel® Acceleration Stack for FPGAs OpenCL™ BSP - -1. Remove any previous FCD files that may be from previous installations of hardware in the `/opt/Intel/OpenCL/Boards/` directory: -```sh -cd /opt/Intel/OpenCL/Boards -sudo rm -rf *.fcd -``` - -2. Install `lsb_release` on your system if you are using CentOS: -```sh -sudo yum install redhat-lsb-core -``` - -3. Create an initialization script `~/init_openvino.sh` with the following content that can be run upon opening a new terminal or rebooting. This will source the script ran above as well as setting up the OpenCL™ environment. -```sh -source $HOME/tools/intelrtestack/init_env.sh -``` -```sh -export CL_CONTEXT_COMPILER_MODE_ALTERA=3 -``` -```sh -export CL_CONTEXT_COMPILER_MODE_INTELFPGA=3 -``` -```sh -export INTELFPGAOCLSDKROOT="/opt/altera/aocl-pro-rte/aclrte-linux64" -``` -```sh -export ALTERAOCLSDKROOT="$INTELFPGAOCLSDKROOT" -``` -```sh -export AOCL_BOARD_PACKAGE_ROOT="$OPAE_PLATFORM_ROOT/opencl/opencl_bsp" -``` -```sh -$AOCL_BOARD_PACKAGE_ROOT/linux64/libexec/setup_permissions.sh -``` -```sh -source $INTELFPGAOCLSDKROOT/init_opencl.sh -``` - -4. Source the script: -```sh -source ~/init_openvino.sh -``` - -5. Some of the settings made in the child scripts need a reboot to take effect. Reboot the machine and source the script again. Note that this script should be sourced each time a new terminal is opened for use with the Intel® Acceleration Stack for FPGAs and Intel® Distribution of OpenVINO™ toolkit. -```sh -source ~/init_openvino.sh -``` - -6. Install the OpenCL™ driver: -```sh -cd ~ -``` -```sh -sudo -E ./tools/intelrtestack/opencl_rte/aclrte-linux64/bin/aocl install -``` -Select **Y** when asked to install the BSP. Note that the following warning can be safely ignored. -```sh -WARNING: install not implemented. Please refer to DCP Quick Start User Guide. -``` - -7. Program the Intel® PAC board with a pre-compiled `.aocx` file (OpenCL™ based FPGA bitstream). -```sh -cd $OPAE_PLATFORM_ROOT/opencl -``` -```sh -aocl program acl0 hello_world.aocx -``` - -8. Build and run the Hello World application: -```sh -sudo tar xf exm_opencl_hello_world_x64_linux.tgz -``` -```sh -sudo chmod -R a+w hello_world -``` -```sh -cd hello_world -``` -```sh -make -``` -```sh -cp ../hello_world.aocx ./bin -``` -```sh -./bin/host -``` - -## Add Intel® Distribution of OpenVINO™ toolkit with FPGA Support to Environment Variables - -1. To run the Intel® Distribution of OpenVINO™ toolkit, add the last four commands to the `~/init_openvino.sh` script. The previous content is shown as well. -```sh -source $HOME/tools/intelrtestack/init_env.sh -export CL_CONTEXT_COMPILER_MODE_ALTERA=3 -export CL_CONTEXT_COMPILER_MODE_INTELFPGA=3 -export INTELFPGAOCLSDKROOT="/opt/altera/aocl-pro-rte/aclrte-linux64" -export ALTERAOCLSDKROOT="$INTELFPGAOCLSDKROOT" -export AOCL_BOARD_PACKAGE_ROOT="$OPAE_PLATFORM_ROOT/opencl/opencl_bsp" -$AOCL_BOARD_PACKAGE_ROOT/linux64/libexec/setup_permissions.sh -source $INTELFPGAOCLSDKROOT/init_opencl.sh -export IE_INSTALL="/opt/intel/openvino/deployment_tools" -source $IE_INSTALL/../bin/setupvars.sh -export PATH="$PATH:$HOME/inference_engine_samples_build/intel64/Release" -alias mo="python3.6 $IE_INSTALL/model_optimizer/mo.py" -``` -For Ubuntu systems, it is recommended to use python3.5 above instead of python3.6. - -2. Source the script -```sh -source ~/init_openvino.sh -``` - -## Program a Bitstream - -The bitstream you program should correspond to the topology you want to deploy. In this section, you program a SqueezeNet bitstream and deploy the classification sample with a SqueezeNet model. - -> **IMPORTANT**: Only use bitstreams from the installed version of the Intel® Distribution of OpenVINO™ toolkit. Bitstreams from older versions of the Intel® Distribution of OpenVINO™ toolkit are incompatible with later versions. For example, you cannot use the `2020-3_RC_FP16_AlexNet_GoogleNet_Generic` bitstream, when the Intel® Distribution of OpenVINO™ toolkit supports the `2020-4_RC_FP16_AlexNet_GoogleNet_Generic bitstream`. - -There are different folders for each FPGA card type which were downloaded in the Intel® Distribution of OpenVINO™ toolkit package. -For the Intel® Programmable Acceleration Card with Intel® Arria® 10 FPGA GX, the pre-trained bitstreams are in the `/opt/intel/openvino/bitstreams/a10_dcp_bitstreams` directory. This example uses a SqueezeNet bitstream with low precision for the classification sample. - -Program the bitstream for Intel® Programmable Acceleration Card with Intel® Arria® 10 FPGA GX. -```sh -aocl program acl0 /opt/intel/openvino/bitstreams/a10_dcp_bitstreams/2020-4_RC_FP11_InceptionV1_ResNet_SqueezeNet_TinyYolo_YoloV3.aocx -``` - -## Use the Intel® Distribution of OpenVINO™ toolkit - -1. Run inference with the Intel® Distribution of OpenVINO™ toolkit independent of the demo scripts using the SqueezeNet model that was download by the scripts. For convenience, copy the necessary files to a local directory. If the workstation has been rebooted or a new terminal is opened, source the script above first. -```sh -mkdir ~/openvino_test -``` -```sh -cd ~/openvino_test -``` -```sh -cp ~/openvino_models/models/public/squeezenet1.1/squeezenet1.1.* . -``` -```sh -cp ~/openvino_models/ir/public/squeezenet1.1/FP16/squeezenet1.1.labels . -``` - -2. Note that the `squeezenet1.1.labels` file contains the classes used by ImageNet and is included here so that the inference results show text rather than classification numbers. Convert the model with the [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). Note that the command below uses the alias defined in the script above and is not referred to in other documentation. -```sh -mo --input_model squeezenet1.1.caffemodel -``` - -3. Now run Inference on the CPU using one of the built in Inference Engine samples: -```sh -classification_sample_async -m squeezenet1.1.xml -i $IE_INSTALL/demo/car.png -``` - -4. Add the `-d` option to run on FPGA: -```sh -classification_sample_async -m squeezenet1.1.xml -i $IE_INSTALL/demo/car.png -d HETERO:FPGA,CPU -``` - -Congratulations, You are done with the Intel® Distribution of OpenVINO™ toolkit installation for FPGA. To learn more about how the Intel® Distribution of OpenVINO™ toolkit works, the Hello World tutorial and are other resources are provided below. - -## Hello World Face Detection Tutorial - -Use the [Intel® Distribution of OpenVINO™ toolkit with FPGA Hello World Face Detection Exercise](https://github.com/fritzboyle/openvino-with-fpga-hello-world-face-detection) to learn more about how the software and hardware work together. - -## Additional Resources - -* Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit). -* Intel® Distribution of OpenVINO™ toolkit documentation: [https://docs.openvinotoolkit.org](https://docs.openvinotoolkit.org). -* [Inference Engine FPGA plugin documentation](../IE_DG/supported_plugins/FPGA.md). +For documentation for previous releases of Intel® Distribution of OpenVINO™ toolkit for Linux* with FPGA Support, see documentation for the [2020.4 version](https://docs.openvinotoolkit.org/2020.4/openvino_docs_install_guides_PAC_Configure.html) and lower. \ No newline at end of file diff --git a/docs/install_guides/VisionAcceleratorFPGA_Configure.md b/docs/install_guides/VisionAcceleratorFPGA_Configure.md index 3e0979d7e45e2b..cdb47001383931 100644 --- a/docs/install_guides/VisionAcceleratorFPGA_Configure.md +++ b/docs/install_guides/VisionAcceleratorFPGA_Configure.md @@ -1,324 +1,21 @@ # Configuration Guide for the Intel® Distribution of OpenVINO™ toolkit 2020.4 and the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA SG2 (IEI's Mustang-F100-A10) on Linux* {#openvino_docs_install_guides_VisionAcceleratorFPGA_Configure} -> **NOTE**: Intel® Arria® 10 FPGA (Mustang-F100-A10) Speed Grade 1 is not available since the OpenVINO 2020.3 release. If you use Intel® Vision Accelerator Design with an Intel® Arria 10 FPGA (Mustang-F100-A10) Speed Grade 1, we recommend continuing to use the [Intel® Distribution of OpenVINO™ toolkit 2020.1](https://docs.openvinotoolkit.org/2020.1/_docs_install_guides_VisionAcceleratorFPGA_Configure.html) release. -For previous versions, see [Configuration Guide for OpenVINO 2019R3](https://docs.openvinotoolkit.org/2019_R3.1/_docs_install_guides_VisionAcceleratorFPGA_Configure_2019R3.html), [Configuration Guide for OpenVINO 2019R1](https://docs.openvinotoolkit.org/2019_R3.1/_docs_install_guides_VisionAcceleratorFPGA_Configure_2019R1.html), [Configuration Guide for OpenVINO 2018R5](https://docs.openvinotoolkit.org/2019_R3.1/_docs_install_guides_VisionAcceleratorFPGA_Configure_2018R5.html). +## Product Change Notice +Intel® Distribution of OpenVINO™ toolkit for Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA -## 1. Configure and Set Up the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA + + + + + + + + + +
Change Notice BeginsJuly 2020
Change DateOctober 2020
-1. Download [Intel® Quartus® Prime Programmer and Tools Standard Edition 18.1](http://fpgasoftware.intel.com/18.1/?edition=standard&platform=linux&download_manager=direct#tabs-4). Install the Intel® Quartus® Prime Programmer and Tools Software to the `/home//intelFPGA/18.1` directory. +Intel will be transitioning to the next-generation programmable deep-learning solution based on FPGAs in order to increase the level of customization possible in FPGA deep-learning. As part of this transition, future standard releases (i.e., non-LTS releases) of Intel® Distribution of OpenVINO™ toolkit will no longer include the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. -2. Download the [fpga_install.sh](https://docs.openvinotoolkit.org/downloads/2020/2/fpga_install.sh) script to the `/home/` directory. +Intel® Distribution of OpenVINO™ toolkit 2020.3.X LTS release will continue to support Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. For questions about next-generation programmable deep-learning solutions based on FPGAs, please talk to your sales representative or contact us to get the latest FPGA updates. - a. Switch to superuser: -```sh -sudo su -``` - b. Use the `fpga_install.sh` script from `/home/` to install your FPGA card (default is SG2). -```sh -source /home//fpga_install.sh -``` - c. To know more about the fpga_install options, invoke the script with `-h` command. -```sh -source /home//fpga_install.sh -h -``` - d. Follow the `fpga_install.sh` script prompts to finish installing your FPGA card. - - e. After reboot launch the script again with same options as in step 2.b. - - f. The `fpga_install.sh` script creates an initialization script `/home//init_openvino.sh` that should be used to setup proper environment variables. - - g. To test if FPGA card was installed succesfully run `aocl diagnose`: -```sh -aocl diagnose -``` -You should see `DIAGNOSTIC_PASSED` before proceeding to the next steps. - - h. If you prefer to install the FPGA card manually, follow the steps 3-17 in this section and [Steps to Flash the FPGA Card](#steps-to-flash-the-fpga-card), otherwise you can skip to "Program a Bitstream". - -3. Check if /etc/udev/rules.d/51-usbblaster.rules file exists and content matches with 3.b, if it does skip to next step. - - a. Switch to superuser: -```sh -sudo su -``` - - b. Create a file named /etc/udev/rules.d/51-usbblaster.rules and add the following lines to it (Red Hat Enterprise 5 and above): -```sh -# Intel FPGA Download Cable -SUBSYSTEM=="usb", ATTR{idVendor}=="09fb", ATTR{idProduct}=="6001", MODE="0666" -SUBSYSTEM=="usb", ATTR{idVendor}=="09fb", ATTR{idProduct}=="6002", MODE="0666" -SUBSYSTEM=="usb", ATTR{idVendor}=="09fb", ATTR{idProduct}=="6003", MODE="0666" - -# Intel FPGA Download Cable II -SUBSYSTEM=="usb", ATTR{idVendor}=="09fb", ATTR{idProduct}=="6010", MODE="0666" -SUBSYSTEM=="usb", ATTR{idVendor}=="09fb", ATTR{idProduct}=="6810", MODE="0666" -``` -> **CAUTION**: Do not add extra line breaks to the .rules file. - - c. Reload udev rules without reboot: -```sh -udevadm control --reload-rules -udevadm trigger -``` - - d. You can exit superuser if you wish. - - -4. Unpack the BSP for your Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA SG2: -> **NOTE**: If you installed OpenVINO™ as root you will need to switch to superuser -```sh -cd /opt/intel/openvino/bitstreams/a10_vision_design_sg2_bitstreams/BSP/ -sudo su -tar -xvzf a10_1150_sg2_r4.1.tgz -chmod -R 755 /opt/intel/openvino/bitstreams/a10_vision_design_sg2_bitstreams -``` -> **NOTE**: If you do not know which version of the board you have, please refer to the product label on the fan cover side or by the product SKU: Mustang-F100-A10E-R10 => SG2 - -5. Create an initialization script `/home//init_openvino.sh` with the following content that can be run upon opening a new terminal or rebooting. This will setup your proper environment variables. -```sh -export IOCL_BOARD_PACKAGE_ROOT=/opt/intel/openvino/bitstreams/a10_vision_design_sg2_bitstreams/BSP/a10_1150_sg2 -export AOCL_BOARD_PACKAGE_ROOT=/opt/intel/openvino/bitstreams/a10_vision_design_sg2_bitstreams/BSP/a10_1150_sg2 -export QUARTUS_DIR=/home//intelFPGA/18.1/qprogrammer -export QUARTUS_ROOTDIR=/home//intelFPGA/18.1/qprogrammer -export INTELFPGAOCLSDKROOT=/opt/altera/aocl-pro-rte/aclrte-linux64 -source $INTELFPGAOCLSDKROOT/init_opencl.sh -export PATH=$PATH:$INTELFPGAOCLSDKROOT/host/linux64/bin:$QUARTUS_ROOTDIR/bin -export CL_CONTEXT_COMPILER_MODE_INTELFPGA=3 -source /opt/intel/openvino/bin/setupvars.sh -``` - -6. Source the script. (This assumes you already have installed the Intel® FPGA Runtime Environment for OpenCL Linux x86-64 Pro Edition 19.1) -```sh -source /home//init_openvino.sh -``` - -7. Uninstall any previous BSP before installing the OpenCL BSP for the 2020.4 BSP. Enter **Y** when prompted to uninstall (Enter sudo credentials when prompted): -```sh -aocl uninstall -``` - -8. Install the new BSP. Enter **Y** when prompted to install (Enter sudo credentials when prompted): -```sh -aocl install -``` - -9. Set up the USB Blaster: - - 1. Connect the cable between the board and the host system. Use the letter codes in the diagram below for the connection points: - - 2. Connect the B end of the cable to point B on the board. - - 3. Connect the F end of the cable to point F on the FPGA download cable. - - 4. From point F end of the cable to point F on the FPGA download cable, the connection is as shown: -![](../img/VisionAcceleratorJTAG.png) - -10. Run `jtagconfig` to ensure that your Intel FPGA Download Cable driver is ready to use: -```sh -jtagconfig -``` -Your output is similar to: -```sh -1) USB-Blaster [1-6] -02E660DD 10AX115H1(.|E2|ES)/10AX115H2/.. -``` -or: -```sh -1) USB-BlasterII [3-3] - 02E660DD 10AX115H1(.|E2|ES)/10AX115H2/.. -``` - -11. Use `jtagconfig` to slow the clock. The message "No parameter named JtagClock" can be safely ignored. -```sh -jtagconfig --setparam 1 JtagClock 6M -``` - -12. (OPTIONAL) Confirm the clock is set to 6M: -```sh -jtagconfig --getparam 1 JtagClock -``` -You should see the following: -```sh -6M -``` - -13. Go to `/opt/intel/openvino/bitstreams/a10_vision_design_sg2_bitstreams/BSP/a10_1150_sg2/bringup`, where `sg2_boardtest_2ddr_base.sof`is located: -```sh -cd /opt/intel/openvino/bitstreams/a10_vision_design_sg2_bitstreams/BSP/a10_1150_sg2/bringup -``` - -14. Program the new sof file to the board: -```sh -quartus_pgm -c 1 -m JTAG -o "p;sg2_boardtest_2ddr_base.sof" -``` - -15. Soft reboot: -```sh -reboot -``` - -16. Source the environment variable script you made. -```sh -source /home//init_openvino.sh -``` - -17. Run `aocl diagnose`: -```sh -aocl diagnose -``` -Your screen displays `DIAGNOSTIC_PASSED`. - -> **NOTE**: at this point if you do not want to flash the FPGA Card you can go to "Program a Bitstream" - -### Steps to Flash the FPGA Card - -> **NOTE**: -> - To avoid having to reprogram the board after a power down, a bitstream will be programmed to permanent memory on the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA. This will take about 20 minutes. -> - The steps can be followed below in this guide to do this. - -18. Use `jtagconfig` to slow the clock. The message "No parameter named JtagClock" can be safely ignored. -```sh -jtagconfig --setparam 1 JtagClock 6M -``` - -19. Check if $QUARTUS_ROOTDIR/linux64/perl/bin exists -```sh -ls $QUARTUS_ROOTDIR/linux64/perl/bin -``` - -20. If you see message "ls: cannot access /home//intelFPGA/18.1/qprogrammer/linux64/perl/bin: No such file or directory" create perl/bin directory and a symbolic link to perl -```sh -mkdir -p $QUARTUS_ROOTDIR/linux64/perl/bin -ln -s /usr/bin/perl $QUARTUS_ROOTDIR/linux64/perl/bin/perl -``` - -21. If you see message "perl" go to the next step - -22. Go to `/opt/intel/openvino/bitstreams/a10_vision_design_sg2_bitstreams/BSP/a10_1150_sg2/bringup`, where `sg2_boardtest_2ddr_top.aocx` is located: -```sh -cd /opt/intel/openvino/bitstreams/a10_vision_design_sg2_bitstreams/BSP/a10_1150_sg2/bringup -``` - -23. Program the `sg2_boardtest_2ddr_top.aocx` file to the flash to be made permanently available even after power cycle: -```sh -sudo su -aocl flash acl0 sg2_boardtest_2ddr_top.aocx -``` -> **NOTE**: You will need the USB Blaster for this. - -24. Hard reboot the host system including powering off. - -25. Source the environment variable script you made. -```sh -source /home//init_openvino.sh -``` - -26. Check if the host system recognizes the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA board. Confirm you can detect the PCIe card: -```sh -lspci | grep -i Altera -``` -Your output is similar to: -```sh -01:00.0 Processing accelerators: Altera Corporation Device 2494 (rev 01) -``` - -27. Run `aocl diagnose`: -```sh -aocl diagnose -``` -You should see `DIAGNOSTIC_PASSED` before proceeding to the next steps. - -## 2. Program a Bitstream - -The bitstream you program should correspond to the topology you want to deploy. In this section, you program a SqueezeNet bitstream and deploy the classification sample with a SqueezeNet model that you used the Model Optimizer to convert in the steps before. - -> **IMPORTANT**: Only use bitstreams from the installed version of the Intel® Distribution of OpenVINO™ toolkit. Bitstreams from older versions of the Intel® Distribution of OpenVINO™ toolkit are incompatible with later versions of the Intel® Distribution of OpenVINO™ toolkit. For example, you cannot use the `2019R4_PL2_FP11_AlexNet_GoogleNet_Generic` bitstream, when the Intel® Distribution of OpenVINO™ toolkit supports the `2020-2_PL2_FP11_AlexNet_GoogleNet_Generic` bitstream. - -Depending on how many bitstreams you selected, there are different folders for each FPGA card type which were downloaded in the Intel® Distribution of OpenVINO™ toolkit package: - -1. For the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA SG2, the pre-trained bitstreams are in `/opt/intel/openvino/bitstreams/a10_vision_design_sg2_bitstreams/`. This example uses a SqueezeNet bitstream with low precision for the classification sample. - -2. Source the environment variable script you made. -```sh -source /home//init_openvino.sh -``` - -3. Change to your home directory: -```sh -cd /home/ -``` - -4. Program the bitstream for the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA SG2: -```sh -aocl program acl0 /opt/intel/openvino/bitstreams/a10_vision_design_sg2_bitstreams/2020-4_PL2_FP16_SqueezeNet_TinyYolo_VGG.aocx -``` - -## 3. Set up a Sample Neural Network Model for FPGA - -> **NOTE**: The SqueezeNet Caffe* model was already downloaded and converted to an FP16 IR when you ran the Image Classification Verification Script while [installing the Intel® Distribution of OpenVINO™ toolkit for Linux* with FPGA Support](installing-openvino-linux-fpga.md). Read this section only if you want to convert the model manually, otherwise skip and go to the next section to run the Image Classification sample application. - -In this section, you will create an FP16 model suitable for hardware accelerators. For more information, see the [FPGA plugin](../IE_DG/supported_plugins/FPGA.md) section in the Inference Engine Developer Guide. - - -1. Create a directory for the FP16 SqueezeNet Model: -```sh -mkdir ~/squeezenet1.1_FP16 -``` - -2. Go to `~/squeezenet1.1_FP16`: -```sh -cd ~/squeezenet1.1_FP16 -``` - -3. Use the Model Optimizer to convert the FP16 SqueezeNet Caffe* model into an FP16 optimized Intermediate Representation (IR). The model files were downloaded when you ran the the Image Classification verification script while [installing the Intel® Distribution of OpenVINO™ toolkit for Linux* with FPGA Support](installing-openvino-linux-fpga.md). To convert, run the Model Optimizer script with the following arguments: -```sh -python3 /opt/intel/openvino/deployment_tools/model_optimizer/mo.py --input_model ~/openvino_models/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir . -``` - -4. The `squeezenet1.1.labels` file contains the classes `ImageNet` uses. This file is included so that the inference results show text instead of classification numbers. Copy `squeezenet1.1.labels` to the your optimized model location: -```sh -cp /opt/intel/openvino/deployment_tools/demo/squeezenet1.1.labels . -``` - -5. Copy a sample image to the release directory. You will use this with your optimized model: -```sh -cp /opt/intel/openvino/deployment_tools/demo/car.png ~/inference_engine_samples_build/intel64/Release -``` - -## 4. Run the Image Classification Sample Application - -In this section you will run the Image Classification sample application, with the Caffe* Squeezenet1.1 model on your Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA. - -Image Classification sample application binary file was automatically built and the FP16 model IR files are created when you ran the Image Classification Verification Script while [installing the Intel® Distribution of OpenVINO™ toolkit for Windows* with FPGA Support](installing-openvino-windows-fpga.md): -* Compiled sample Application binaries are located in the `~/inference_engine_samples_build/intel64/Release` directory. -* Generated IR files are in the `~/openvino_models/ir/public/squeezenet1.1/FP16/` directory. - - -1. Go to the samples directory -```sh -cd ~/inference_engine_samples_build/intel64/Release -``` - -2. Use an Inference Engine sample to run a sample inference on the CPU: -```sh -./classification_sample_async -i car.png -m ~/openvino_models/ir/public/squeezenet1.1/FP16/squeezenet1.1.xml -``` -Note the CPU throughput in Frames Per Second (FPS). This tells you how quickly the inference is done on the hardware. Now run the inference using the FPGA. - -3. Add the `-d` option to target the FPGA: -```sh -./classification_sample_async -i car.png -m ~/openvino_models/ir/public/squeezenet1.1/FP16/squeezenet1.1.xml -d HETERO:FPGA,CPU -``` -The throughput on FPGA is listed and may show a lower FPS. This may be due to the initialization time. To account for that, increase the number of iterations or batch size when deploying to get a better sense of the speed the FPGA can run inference at. - -Congratulations, you are done with the Intel® Distribution of OpenVINO™ toolkit installation for FPGA. To learn more about how the Intel® Distribution of OpenVINO™ toolkit works, the Hello World tutorial and are other resources are provided below. - -## Hello World Face Detection Tutorial - -Use the [Intel® Distribution of OpenVINO™ toolkit with FPGA Hello World Face Detection Exercise](https://github.com/fritzboyle/openvino-with-fpga-hello-world-face-detection) to learn more about how the software and hardware work together. - -## Additional Resources - -* Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit). -* Intel® Distribution of OpenVINO™ toolkit documentation: [https://docs.openvinotoolkit.org](https://docs.openvinotoolkit.org). -* [Inference Engine FPGA plugin documentation](../IE_DG/supported_plugins/FPGA.md). +For documentation for previous releases of Intel® Distribution of OpenVINO™ toolkit for Linux* with FPGA Support, see documentation for the [2020.4 version](https://docs.openvinotoolkit.org/2020.4/openvino_docs_install_guides_VisionAcceleratorFPGA_Configure.html) and lower. \ No newline at end of file diff --git a/docs/install_guides/VisionAcceleratorFPGA_Configure_Windows.md b/docs/install_guides/VisionAcceleratorFPGA_Configure_Windows.md index 218bfddb9bdd65..c0c615c0bb8db8 100644 --- a/docs/install_guides/VisionAcceleratorFPGA_Configure_Windows.md +++ b/docs/install_guides/VisionAcceleratorFPGA_Configure_Windows.md @@ -1,113 +1,21 @@ # Configuration Guide for the Intel® Distribution of OpenVINO™ toolkit and the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA SG2 (IEI's Mustang-F100-A10) on Windows* {#openvino_docs_install_guides_VisionAcceleratorFPGA_Configure_Windows} -> **NOTE**: Intel® Arria® 10 FPGA (Mustang-F100-A10) Speed Grade 1 is not available in the OpenVINO 2020.3 package. +## Product Change Notice +Intel® Distribution of OpenVINO™ toolkit for Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA -## 1. Configure and Set Up the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA + + + + + + + + + +
Change Notice BeginsJuly 2020
Change DateOctober 2020
-1. Download [Intel® Quartus® Prime Programmer and Tools Standard Edition 18.1](http://fpgasoftware.intel.com/18.1/?edition=standard&platform=windows&download_manager=direct#tabs-4). Install the Intel® Quartus® Prime Programmer and Tools Software to the `C:\intelFPGA\18.1` directory. +Intel will be transitioning to the next-generation programmable deep-learning solution based on FPGAs in order to increase the level of customization possible in FPGA deep-learning. As part of this transition, future standard releases (i.e., non-LTS releases) of Intel® Distribution of OpenVINO™ toolkit will no longer include the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. -2. Download [OpenSSL](http://slproweb.com/download/Win64OpenSSL_Light-1_1_1f.exe). Install the OpenSSL and add the `\bin` path to your system `PATH` variable. +Intel® Distribution of OpenVINO™ toolkit 2020.3.X LTS release will continue to support Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. For questions about next-generation programmable deep-learning solutions based on FPGAs, please talk to your sales representative or contact us to get the latest FPGA updates. -3. Unpack the BSP for your Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA SG2: -Extract `Intel_vision_accel_win_driver_1.2_SG2.zip` from `C:\Program Files (x86)\IntelSWTools\openvino\a10_vision_design_sg2_bitstreams\BSP` to `C:\intelFPGA\19.2\aclrte-windows64\board` -5. Open an admin command prompt. -6. Setup your environment variables: -```sh -set INTELFPGAOCLSDKROOT=C:\intelFPGA\19.2\aclrte-windows64 -set AOCL_BOARD_PACKAGE_ROOT=%INTELFPGAOCLSDKROOT%\board\a10_1150_sg2 -set IOCL_BOARD_PACKAGE_ROOT=%INTELFPGAOCLSDKROOT%\board\a10_1150_sg2 -C:\intelFPGA\19.2\aclrte-windows64\init_opencl.bat -"C:\Program Files (x86)\IntelSWTools\openvino\bin\setupvars.bat" -``` -7. Uninstall any previous BSP before installing the OpenCL BSP for the 2020.3 BSP. Enter **Y** when prompted to uninstall: -```sh -aocl uninstall -``` -8. Install the new BSP. Enter **Y** when prompted to install -```sh -aocl install -``` -9. Run `aocl diagnose`: -```sh -aocl diagnose -``` -Your screen displays `DIAGNOSTIC_PASSED`. - -## 2. Program a Bitstream - -The bitstream you program should correspond to the topology you want to deploy. In this section, you program a SqueezeNet bitstream and deploy the classification sample with a SqueezeNet model that you used the Model Optimizer to convert in the steps before. - -> **IMPORTANT**: Only use bitstreams from the installed version of the Intel® Distribution of OpenVINO™ toolkit. Bitstreams from older versions of the Intel® Distribution of OpenVINO™ toolkit are incompatible with later versions of the Intel® Distribution of OpenVINO™ toolkit. For example, you cannot use the `2019R4_PL2_FP11_AlexNet_GoogleNet_Generic` bitstream, when the Intel® Distribution of OpenVINO™ toolkit supports the `2020-3_PL2_FP11_AlexNet_GoogleNet_Generic` bitstream. - -Depending on how many bitstreams you selected, there are different folders for each FPGA card type which were downloaded in the Intel® Distribution of OpenVINO™ toolkit package: - -1. For the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA SG2, the pre-trained bitstreams are in `C:\Program Files (x86)\IntelSWTools\openvino\a10_vision_design_sg2_bitstreams`. This example uses a SqueezeNet bitstream with low precision for the classification sample. - -2. Program the bitstream for the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA SG2: -```sh -aocl program acl0 "C:\Program Files (x86)\IntelSWTools\openvino\a10_vision_design_sg2_bitstreams/2020-3_PL2_FP16_SqueezeNet_TinyYolo_VGG.aocx" -``` - -## 3. Set up a Sample Neural Network Model for FPGA - -> **NOTE**: The SqueezeNet Caffe* model was already downloaded and converted to an FP16 IR when you ran the Image Classification Verification Script while [installing the Intel® Distribution of OpenVINO™ toolkit for Windows* with FPGA Support](installing-openvino-windows-fpga.md). Read this section only if you want to convert the model manually, otherwise skip and go to the next section to run the Image Classification sample application. - -In this section, you will prepare a sample FP16 model suitable for hardware accelerators. For more information, see the [FPGA plugin](../IE_DG/supported_plugins/FPGA.html) section in the Inference Engine Developer Guide. - -1. Create a directory for the FP16 SqueezeNet Model: -```sh -mkdir %HOMEPATH%\squeezenet1.1_FP16 -``` - -2. Go to `%HOMEPATH%\squeezenet1.1_FP16`: -```sh -cd %HOMEPATH%\squeezenet1.1_FP16 -``` - -3. Use the Model Optimizer to convert the FP16 SqueezeNet Caffe* model into an FP16 optimized Intermediate Representation (IR). The model files were downloaded when you ran the the Image Classification verification script while [installing the Intel® Distribution of OpenVINO™ toolkit for Windows* with FPGA Support](installing-openvino-windows-fpga.md). To convert, run the Model Optimizer script with the following arguments: -```sh -python "C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\model_optimizer\mo.py" --input_model %HOMEPATH%\Documents\Intel\OpenVINO\openvino_models\models\public\squeezenet1.1\squeezenet1.1.caffemodel --data_type FP16 --output_dir . -``` - -4. The `squeezenet1.1.labels` file contains the classes `ImageNet` uses. This file is included so that the inference results show text instead of classification numbers. Copy `squeezenet1.1.labels` to the your optimized model location: -```sh -xcopy "C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\demo\squeezenet1.1.labels" . -``` - -5. Copy a sample image to the release directory. You will use this with your optimized model: -```sh -xcopy "C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\demo\car.png" %HOMEPATH%\Documents\Intel\OpenVINO\inference_engine_samples_build\intel64\Release -``` - -## 4. Run the Image Classification Sample Application - -In this section you will run the Image Classification sample application, with the Caffe* Squeezenet1.1 model on your Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA. - -Image Classification sample application binary file was automatically built and the FP16 model IR files are created when you ran the Image Classification Verification Script while [installing the Intel® Distribution of OpenVINO™ toolkit for Windows* with FPGA Support](installing-openvino-windows-fpga.md): -* Compiled sample Application binaries are located in the `%HOMEPATH%\Documents\Intel\OpenVINO\inference_engine_samples_build\intel64\Release` folder. -* Generated IR files are in the `%HOMEPATH%\Documents\Intel\OpenVINO\openvino_models\ir\public\squeezenet1.1\FP16` folder. - -1. Go to the samples directory -```sh -cd %HOMEPATH%\Documents\Intel\OpenVINO\inference_engine_samples_build\intel64\Release -``` - -2. Use an Inference Engine sample to run a sample inference on the CPU: -```sh -classification_sample_async -i car.png -m %HOMEPATH%\Documents\Intel\OpenVINO\openvino_models\ir\public\squeezenet1.1\FP16\squeezenet1.1.xml -``` -Note the CPU throughput in Frames Per Second (FPS). This tells you how quickly the inference is done on the hardware. Now run the inference using the FPGA. - -3. Add the `-d` option to target the FPGA: -```sh -classification_sample_async -i car.png -m %HOMEPATH%\Documents\Intel\OpenVINO\openvino_models\ir\public\squeezenet1.1\FP16\squeezenet1.1.xml -d HETERO:FPGA,CPU -``` -The throughput on FPGA is listed and may show a lower FPS. This may be due to the initialization time. To account for that, increase the number of iterations or batch size when deploying to get a better sense of the speed the FPGA can run inference at. - -Congratulations, you are done with the Intel® Distribution of OpenVINO™ toolkit installation for FPGA. To learn more about how the Intel® Distribution of OpenVINO™ toolkit works, try the other resources that are provided below. - -## Additional Resources - -* Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit). -* Intel® Distribution of OpenVINO™ toolkit documentation: [https://docs.openvinotoolkit.org](https://docs.openvinotoolkit.org). -* [Inference Engine FPGA plugin documentation](../IE_DG/supported_plugins/FPGA.md). +For documentation for previous releases of Intel® Distribution of OpenVINO™ toolkit for Windows* with FPGA Support, see documentation for the [2020.4 version](https://docs.openvinotoolkit.org/2020.4/openvino_docs_install_guides_VisionAcceleratorFPGA_Configure_Windows.html) and lower. \ No newline at end of file diff --git a/docs/install_guides/installing-openvino-docker-linux.md b/docs/install_guides/installing-openvino-docker-linux.md index 46ed7dc29a871c..5353d70760404e 100644 --- a/docs/install_guides/installing-openvino-docker-linux.md +++ b/docs/install_guides/installing-openvino-docker-linux.md @@ -284,45 +284,12 @@ docker run --rm --net=host -v /var/tmp:/var/tmp –ipc=host -ti > - Alternatively, you can start hddldaemon with the root user on host, but this approach is not recommended. ## Use a Docker* Image for FPGA -### Build a Docker* Image for FPGA -FPGA card is not available in container by default, but it can be mounted there with the following pre-requisites: -- FPGA device is up and ready to run inference. -- FPGA bitstreams were pushed to the device over PCIe. +Intel will be transitioning to the next-generation programmable deep-learning solution based on FPGAs in order to increase the level of customization possible in FPGA deep-learning. As part of this transition, future standard releases (i.e., non-LTS releases) of Intel® Distribution of OpenVINO™ toolkit will no longer include the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. -To build a Docker* image for FPGA: +Intel® Distribution of OpenVINO™ toolkit 2020.3.X LTS release will continue to support Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. For questions about next-generation programmable deep-learning solutions based on FPGAs, please talk to your sales representative or contact us to get the latest FPGA updates. -1. Set additional environment variables in the `Dockerfile`:
-```sh -ENV CL_CONTEXT_COMPILER_MODE_INTELFPGA=3 -ENV DLA_AOCX=/opt/intel/openvino/a10_devkit_bitstreams/2-0-1_RC_FP11_Generic.aocx -ENV PATH=/opt/altera/aocl-pro-rte/aclrte-linux64/bin:$PATH -``` -2. Install the following UDEV rule:
-```sh -cat < fpga.rules -KERNEL=="acla10_ref*",GROUP="users",MODE="0660" -EOF -sudo cp fpga.rules /etc/udev/rules.d/ -sudo udevadm control --reload-rules -sudo udevadm trigger -sudo ldconfig -``` -Make sure that a container user is added to the "users" group with the same GID as on host. - -### Run the Docker* container for FPGA - -To run the built Docker* container for FPGA, use the following command: - -```sh -docker run --rm -it \ ---mount type=bind,source=/opt/intel/intelFPGA_pro,destination=/opt/intel/intelFPGA_pro \ ---mount type=bind,source=/opt/altera,destination=/opt/altera \ ---mount type=bind,source=/etc/OpenCL/vendors,destination=/etc/OpenCL/vendors \ ---mount type=bind,source=/opt/Intel/OpenCL/Boards,destination=/opt/Intel/OpenCL/Boards \ ---device /dev/acla10_ref0:/dev/acla10_ref0 \ - -``` +For instructions for previous releases with FPGA Support, see documentation for the [2020.4 version](https://docs.openvinotoolkit.org/2020.4/openvino_docs_install_guides_installing_openvino_docker_linux.html#use_a_docker_image_for_fpga) or lower. ## Examples * [ubuntu18_runtime dockerfile](https://docs.openvinotoolkit.org/downloads/ubuntu18_runtime.dockerfile) - Can be used to build OpenVINO™ runtime image containing minimal dependencies needed to use OpenVINO™ in production environment. diff --git a/docs/install_guides/installing-openvino-linux-fpga.md b/docs/install_guides/installing-openvino-linux-fpga.md index b0907b9c75c945..53561ce96a311f 100644 --- a/docs/install_guides/installing-openvino-linux-fpga.md +++ b/docs/install_guides/installing-openvino-linux-fpga.md @@ -1,16 +1,5 @@ # Install Intel® Distribution of OpenVINO™ toolkit for Linux* with FPGA Support {#openvino_docs_install_guides_installing_openvino_linux_fpga} -**NOTES**: -- [Intel® System Studio](https://software.intel.com/en-us/system-studio) is an all-in-one, cross-platform tool suite, purpose-built to simplify system bring-up and improve system and IoT device application performance on Intel® platforms. If you are using the Intel® Distribution of OpenVINO™ with Intel® System Studio, go to [Get Started with Intel® System Studio](https://software.intel.com/en-us/articles/get-started-with-openvino-and-intel-system-studio-2019). -- The Intel® Distribution of OpenVINO™ toolkit was formerly known as the Intel® Computer Vision SDK. -- These steps apply to Ubuntu\*, CentOS\*, and Yocto\*. -- If you are using Intel® Distribution of OpenVINO™ toolkit on Windows\* OS, see the [Installation Guide for Windows*](installing-openvino-windows.md). -- For the Intel Distribution of OpenVINO toolkit without FPGA -support, see [Installation Guide for Linux*](installing-openvino-linux.md). -- CentOS and Yocto installations will require some modifications that -are not covered in this guide. -- An internet connection is required to follow the steps in this guide. - ## Product Change Notice Intel® Distribution of OpenVINO™ toolkit for Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA @@ -29,323 +18,4 @@ Intel will be transitioning to the next-generation programmable deep-learning so Intel® Distribution of OpenVINO™ toolkit 2020.3.X LTS release will continue to support Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. For questions about next-generation programmable deep-learning solutions based on FPGAs, please talk to your sales representative or contact us to get the latest FPGA updates. -## Introduction - -The Intel® Distribution of OpenVINO™ toolkit quickly deploys applications and solutions that emulate human vision. Based on Convolutional Neural Networks (CNN), the toolkit extends computer vision (CV) workloads across Intel® hardware, maximizing performance. The Intel® Distribution of OpenVINO™ toolkit includes the Intel® Deep Learning Deployment Toolkit (Intel® DLDT). - -The Intel® Distribution of OpenVINO™ toolkit for Linux\* with FPGA Support: - -- Enables CNN-based deep learning inference on the edge -- Supports heterogeneous execution across Intel® CPU, Intel® Integrated Graphics, Intel® FPGA, Intel® Neural Compute Stick 2 -- Speeds time-to-market via an easy-to-use library of computer vision functions and pre-optimized kernels -- Includes optimized calls for computer vision standards including OpenCV\* and OpenCL™ - -**Included with the Installation and installed by default:** - -| Component | Description | -|-----------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [Model Optimizer](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) | This tool imports, converts, and optimizes models that were trained in popular frameworks to a format usable by Intel tools, especially the Inference Engine. 
Popular frameworks include Caffe\*, TensorFlow\*, MXNet\*, and ONNX\*. | -| [Inference Engine](../IE_DG/inference_engine_intro.md) | This is the engine that runs the deep learning model. It includes a set of libraries for an easy inference integration into your applications. | -| Drivers and runtimes for OpenCL™ version 2.1 | Enables OpenCL on the GPU/CPU for Intel® processors | -| Intel® Media SDK | Offers access to hardware accelerated video codecs and frame processing | -| Pre-compiled FPGA bitstream samples | Pre-compiled bitstream samples for the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA, and Intel® Vision Accelerator Design with an Intel® Arria 10 FPGA SG2. | -| Intel® FPGA SDK for OpenCL™ software technology | The Intel® FPGA RTE for OpenCL™ provides utilities, host runtime libraries, drivers, and RTE-specific libraries and files | -| [OpenCV](https://docs.opencv.org/master/) | OpenCV\* community version compiled for Intel® hardware | -| [Inference Engine Code Samples](../IE_DG/Samples_Overview.md) | A set of simple console applications demonstrating how to utilize specific OpenVINO capabilities in an application and how to perform specific tasks, such as loading a model, running inference, querying specific device capabilities, and more. | -| [Demo Applications](@ref omz_demos_README) | A set of simple console applications that provide robust application templates to help you implement specific deep learning scenarios. | - - -## Development and Target Platform - -The development and target platforms have the same requirements, but you can select different components during the installation, based on your intended use. - -**Hardware** - -* 6th to 10th generation Intel® Core™ processors and Intel® Xeon® processors -* Intel® Xeon® processor E family (formerly code named Sandy Bridge, Ivy Bridge, Haswell, and Broadwell) -* 3rd generation Intel® Xeon® Scalable processor (formerly code named Cooper Lake) -* Intel® Xeon® Scalable processor (formerly Skylake and Cascade Lake) -* Intel Atom® processor with support for Intel® Streaming SIMD Extensions 4.1 (Intel® SSE4.1) -* Intel Pentium® processor N4200/5, N3350/5, or N3450/5 with Intel® HD Graphics -* Intel® Neural Compute Stick 2 -* Intel® Vision Accelerator Design with Intel® Movidius™ VPUs -* Intel® Programmable Acceleration Card (PAC) with Intel® Arria® 10 GX FPGA -* Intel® Vision Accelerator Design with an Intel® Arria 10 FPGA (Mustang-F100-A10) SG2 - -> **NOTE**: With OpenVINO™ 2020.4 release, Intel® Movidius™ Neural Compute Stick is no longer supported. - -> **NOTE**: Intel® Arria 10 FPGA (Mustang-F100-A10) SG1 is no longer supported. If you use Intel® Vision Accelerator Design with an Intel® Arria 10 FPGA (Mustang-F100-A10) Speed Grade 1, we recommend continuing to use the [Intel® Distribution of OpenVINO™ toolkit 2020.1](https://docs.openvinotoolkit.org/2020.1/_docs_install_guides_VisionAcceleratorFPGA_Configure.html) release. - -> **NOTE**: Intel® Arria® 10 GX FPGA Development Kit is no longer supported. For the Intel® Arria® 10 FPGA GX Development Kit configuration guide, refer to the [2019 R1.1 documentation](http://docs.openvinotoolkit.org/2019_R1.1/_docs_install_guides_GX_Configure_2019R1.html). - -**Processor Notes:** - -- Processor graphics are not included in all processors. See [Product Specifications](https://ark.intel.com/) for information about your processor. -- A chipset that supports processor graphics is required for Intel® Xeon® processors. - -**Operating Systems:** - -- Ubuntu 18.04 or 16.04 long-term support (LTS), 64-bit: Minimum supported kernel is 4.15 -- CentOS 7.4, 64-bit -- Yocto Project v3.0, 64-bit (for target only and requires modifications) - -## Overview - -This guide provides step-by-step instructions on how to install the Intel® Distribution of OpenVINO™ toolkit with FPGA Support. Links are provided for each type of compatible hardware including downloads, initialization and configuration steps. The following steps will be covered: - -1. Install the Intel® Distribution of OpenVINO™ Toolkit -2. Install External software dependencies -3. Configure the Model Optimizer -4. Run the Verification Scripts to Verify Installation and Compile Samples -5. Install your compatible hardware from the list of supported hardware
-6. Use the Face Detection Tutorial - -## Install the Intel® Distribution of OpenVINO™ Toolkit Core Components - -Download the Intel® Distribution of OpenVINO™ toolkit package file from [Intel® Distribution of OpenVINO™ toolkit for Linux* with FPGA Support](https://software.intel.com/en-us/openvino-toolkit/choose-download). -Select the Intel® Distribution of OpenVINO™ toolkit for Linux with FPGA Support package from the dropdown menu. - -1. Open a command prompt terminal window. -2. Change directories to where you downloaded the Intel Distribution of -OpenVINO toolkit for Linux\* with FPGA Support package file.
- If you downloaded the package file to the current user's `Downloads` - directory: -```sh -cd ~/Downloads/ -``` -By default, the file is saved as `l_openvino_toolkit_fpga_p_.tgz`. - -3. Unpack the .tgz file: -```sh -tar -xvzf l_openvino_toolkit_fpga_p_.tgz -``` -The files are unpacked to the `l_openvino_toolkit_fpga_p_` directory. - -4. Go to the `l_openvino_toolkit_fpga_p_` directory: -```sh -cd l_openvino_toolkit_fpga_p_ -``` -If you have a previous version of the Intel Distribution of OpenVINO toolkit installed, rename or delete these two directories: -- `/home//inference_engine_samples` -- `/home//openvino_models` - -**Installation Notes:** -- Choose an installation option and run the related script as root. -- You can use either a GUI installation wizard or command-line instructions (CLI). -- Screenshots are provided for the GUI, but not for CLI. The following information also applies to CLI and will be helpful to your installation where you will be presented with the same choices and tasks. - -5. Choose your installation option: - - **Option 1:** GUI Installation Wizard: -```sh -sudo ./install_GUI.sh -``` - - **Option 2:** Command-Line Instructions: -```sh -sudo ./install.sh -``` -6. Follow the instructions on your screen. Watch for informational -messages such as the following in case you must complete additional -steps: -![](../img/install-linux-fpga-01.png) - -7. If you select the default options, the **Installation summary** GUI screen looks like this: -![](../img/install-linux-fpga-02.png) - - **Optional:** You can choose **Customize** and select only the bitstreams for your card. This will allow you to minimize - the size of the download by several gigabytes. - - The following bitstreams listed at the bottom of the customization screen are highlighted below. Choose the one for your FPGA: - ![](../img/install-linux-fpga-04.png) - - When installed as **root** the default installation directory for the Intel Distribution of OpenVINO is - `/opt/intel/openvino_fpga_2019./`.
- For simplicity, a symbolic link to the latest installation is also created: `/opt/intel/openvino/`. - -8. A Complete screen indicates that the core components have been installed: -![](../img/install-linux-fpga-05.png) - -The first core components are installed. Continue to the next section to install additional dependencies. - -## Install External Software Dependencies - -These dependencies are required for: - -- Intel-optimized build of OpenCV library -- Deep Learning Inference Engine -- Deep Learning Model Optimizer tools - -1. Change to the `install_dependencies` directory: -```sh -cd /opt/intel/openvino/install_dependencies -``` -2. Run a script to download and install the external software dependencies: -```sh -sudo -E ./install_openvino_dependencies.sh -``` - -The dependencies are installed. Continue to the next section to configure the Model Optimizer. - -## Configure the Model Optimizer - -The Model Optimizer is a Python\*-based command line tool for importing -trained models from popular deep learning frameworks such as Caffe\*, -TensorFlow\*, Apache MXNet\*, ONNX\* and Kaldi\*. - -The Model Optimizer is a key component of the Intel Distribution of -OpenVINO toolkit. You cannot perform inference on your trained model without -running the model through the Model Optimizer. When you run a -pre-trained model through the Model Optimizer, your output is an -Intermediate Representation (IR) of the network. The Intermediate -Representation is a pair of files that describe the whole model: - -- `.xml`: Describes the network topology -- `.bin`: Contains the weights and biases binary data - -For more information about the Model Optimizer, refer to the [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).  - -### Model Optimizer Configuration Steps - -> **IMPORTANT**: The Internet access is required to execute the following steps successfully. If you have access to the Internet through the proxy server only, please make sure that it is configured in your environment. - -You can choose to either configure all supported frameworks at once **OR** configure one framework at a time. Choose the option that best suits your needs. If you see error messages, make sure you installed all dependencies. - -> **NOTE**: If you installed the Intel® Distribution of OpenVINO™ to the non-default install directory, replace `/opt/intel` with the directory in which you installed the software. - -**Option 1: Configure all supported frameworks at the same time** - -1. Go to the Model Optimizer prerequisites directory: -```sh -cd /opt/intel/openvino/deployment_tools/model_optimizer/install_prerequisites -``` -2. Run the script to configure the Model Optimizer for Caffe, - TensorFlow 1.x, MXNet, Kaldi\*, and ONNX: -```sh -sudo ./install_prerequisites.sh -``` - -**Option 2: Configure each framework separately** - -Configure individual frameworks separately **ONLY** if you did not select **Option 1** above. -1. Go to the Model Optimizer prerequisites directory: -```sh -cd /opt/intel/openvino/deployment_tools/model_optimizer/install_prerequisites -``` -2. Run the script for your model framework. You can run more than one script: -- For **Caffe**: - -```sh -sudo ./install_prerequisites_caffe.sh -``` -- For **TensorFlow 1.x**: -```sh -sudo ./install_prerequisites_tf.sh -``` -- For **TensorFlow 2.x**: -```sh -sudo ./install_prerequisites_tf2.sh -``` -- For **MXNet**: -```sh -sudo ./install_prerequisites_mxnet.sh -``` -- For **ONNX**: -```sh -sudo ./install_prerequisites_onnx.sh -``` -- For **Kaldi**: -```sh -sudo ./install_prerequisites_kaldi.sh -``` -The Model Optimizer is configured for one or more frameworks. - -You are ready to compile the samples by running the verification scripts. - -## Run the Verification Scripts to Verify Installation and Compile Samples - -To verify the installation and compile two samples, run the verification applications provided with the product on the CPU: - -1. Go to the **Inference Engine demo** directory: -```sh -cd /opt/intel/openvino/deployment_tools/demo -``` - -2. Run the **Image Classification verification script**: -```sh -./demo_squeezenet_download_convert_run.sh -``` -This verification script downloads a SqueezeNet model, uses the Model Optimizer to convert the model to the .bin and .xml Intermediate Representation (IR) files. The Inference Engine requires this model conversion so it can use the IR as input and achieve optimum performance on Intel hardware.
-This verification script builds the [Image Classification Sample Async](../../inference-engine/samples/classification_sample_async/README.md) application and run it with the `car.png` image in the demo directory. When the verification script completes, you will have the label and confidence for the top-10 categories: -![](../img/image_classification_script_output_lnx.png) - -3. Run the **Inference Pipeline verification script**: -```sh -./demo_security_barrier_camera.sh -``` -This verification script builds the [Security Barrier Camera Demo](@ref omz_demos_security_barrier_camera_demo_README) application included in the package. - - This verification script uses the `car_1.bmp` image in the demo directory to show an inference pipeline using three of the pre-trained models. The verification script uses vehicle recognition in which vehicle attributes build on each other to narrow in on a specific attribute. - - First, an object is identified as a vehicle. This identification is used as input to the next model, which identifies specific vehicle attributes, including the license plate. Finally, the attributes identified as the license plate are used as input to the third model, which recognizes specific characters in the license plate. - - When the verification script completes, you will see an image that displays the resulting frame with detections rendered as bounding boxes, and text: - ![](../img/security-barrier-results.png) - -4. Close the image viewer window to complete the verification script. - - -To learn about the verification scripts, see the `README.txt` file in `/opt/intel/openvino/deployment_tools/demo`. - -For a description of the Intel Distribution of OpenVINO™ pre-trained object detection and object recognition models, see [Overview of OpenVINO™ Toolkit Pre-Trained Models](@ref omz_models_intel_index). - -You have completed all required installation, configuration and build steps in this guide to use your CPU to work with your trained models. To use other hardware, see Install and Configure your Compatible Hardware below. - -## Install and Configure Your Compatible Hardware - -Install your compatible hardware from the list of supported components below. - -> **NOTE**: Once you've completed your hardware installation, you'll return to this guide to finish installation and configuration of the Intel® Distribution of OpenVINO™ toolkit. - -Links to install and configure compatible hardware -- [The Intel® Programmable Acceleration Card (PAC) with Intel® Arria® 10 GX FPGA](PAC_Configure.md) -- [The Intel® Vision Accelerator Design with an Intel® Arria 10 FPGA SG2 (Mustang-F100-A10)](VisionAcceleratorFPGA_Configure.md) -- [Intel® Vision Accelerator Design with Intel® Movidius™ VPUs](installing-openvino-linux-ivad-vpu.md) - -Congratulations, you have finished the Intel® Distribution of OpenVINO™ toolkit installation for FPGA. To learn more about how the Intel® Distribution of OpenVINO™ toolkit works, the Hello World tutorial and other resources are provided below. - -## Hello World Face Detection Tutorial - -Refer to the [OpenVINO™ with FPGA Hello World Face Detection Exercise](https://github.com/intel-iot-devkit/openvino-with-fpga-hello-world-face-detection). - -## Troubleshooting - -PRC developers might encounter pip installation related issues during OpenVINO™ installation. To resolve the issues, you may use one of the following options at your discretion: -* Add the download source with `-i` parameter in the `pip` command. For example: -``` -pip install numpy.py -i https://mirrors.aliyun.com/pypi/simple/ -``` -Use the `--trusted-host` parameter if the URL above is `http` instead of `https`. - -* Modify or create `~/.pip/pip.conf` file to change the default download source with the content below: -``` -[global] -index-url = http://mirrors.aliyun.com/pypi/simple/ -[install] -trusted-host = mirrors.aliyun.com -``` - -**Additional Resources** - -- Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit) -- OpenVINO™ toolkit online documentation: [https://docs.openvinotoolkit.org](https://docs.openvinotoolkit.org) -- [Inference Engine FPGA plugin documentation](../IE_DG/supported_plugins/FPGA.md) -- [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). -- For more information on Sample Applications, see the [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md). -- To learn about pre-trained models for OpenVINO™ toolkit, see the [Pre-Trained Models Overview](@ref omz_models_intel_index). -- For information on Inference Engine Tutorials, see the [Inference Tutorials](https://github.com/intel-iot-devkit/inference-tutorials-generic) -- For IoT Libraries & Code Samples see the [Intel® IoT Developer Kit](https://github.com/intel-iot-devkit). - -To learn more about converting models, go to: - -- [Convert Your Caffe* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_Caffe.md) -- [Convert Your TensorFlow* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md) -- [Convert Your MXNet* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md) -- [Convert Your ONNX* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md) - - +For installation instructions for the last release of Intel® Distribution of OpenVINO™ toolkit for Linux* with FPGA Support, see documentation for the [2020.4 version](https://docs.openvinotoolkit.org/2020.4/openvino_docs_install_guides_installing_openvino_linux_fpga.html). \ No newline at end of file diff --git a/docs/install_guides/installing-openvino-linux.md b/docs/install_guides/installing-openvino-linux.md index b300b9866ddc3b..c4088aac99fd4e 100644 --- a/docs/install_guides/installing-openvino-linux.md +++ b/docs/install_guides/installing-openvino-linux.md @@ -3,7 +3,6 @@ > **NOTES**: > - These steps apply to Ubuntu\*, CentOS\*, and Yocto\*. > - If you are using Intel® Distribution of OpenVINO™ toolkit on Windows\* OS, see the [Installation Guide for Windows*](installing-openvino-windows.md). -> - For the Intel Distribution of OpenVINO toolkit with FPGA support, see [Installation Guide for Linux* with FPGA support](installing-openvino-linux-fpga.md). > - CentOS and Yocto installations will require some modifications that are not covered in this guide. > - An internet connection is required to follow the steps in this guide. > - [Intel® System Studio](https://software.intel.com/en-us/system-studio) is an all-in-one, cross-platform tool suite, purpose-built to simplify system bring-up and improve system and IoT device application performance on Intel® platforms. If you are using the Intel® Distribution of OpenVINO™ with Intel® System Studio, go to [Get Started with Intel® System Studio](https://software.intel.com/en-us/articles/get-started-with-openvino-and-intel-system-studio-2019). diff --git a/docs/install_guides/installing-openvino-windows-fpga.md b/docs/install_guides/installing-openvino-windows-fpga.md index 3a76f477e5847c..dbe44200f494a8 100644 --- a/docs/install_guides/installing-openvino-windows-fpga.md +++ b/docs/install_guides/installing-openvino-windows-fpga.md @@ -1,12 +1,5 @@ # Install Intel® Distribution of OpenVINO™ toolkit for Windows* with FPGA Support {#openvino_docs_install_guides_installing_openvino_windows_fpga} -**NOTES**: -- These steps apply to Microsoft Windows 10*. -- For the Intel Distribution of OpenVINO toolkit for Windows* without FPGA -support, see [Installation Guide for Windows*](installing-openvino-windows.md). -- An internet connection is required to follow the steps in this guide. -- [Intel® System Studio](https://software.intel.com/en-us/system-studio) is an all-in-one, cross-platform tool suite, purpose-built to simplify system bring-up and improve system and IoT device application performance on Intel® platforms. If you are using the Intel® Distribution of OpenVINO™ with Intel® System Studio, go to [Get Started with Intel® System Studio](https://software.intel.com/en-us/articles/get-started-with-openvino-and-intel-system-studio-2019). - ## Product Change Notice Intel® Distribution of OpenVINO™ toolkit for Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA @@ -25,429 +18,4 @@ Intel will be transitioning to the next-generation programmable deep-learning so Intel® Distribution of OpenVINO™ toolkit 2020.3.X LTS release will continue to support Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. For questions about next-generation programmable deep-learning solutions based on FPGAs, please talk to your sales representative or contact us to get the latest FPGA updates. -## Introduction - -> **IMPORTANT**: -> - All steps in this guide are required, unless otherwise stated.
-> - In addition to the download package, you must install dependencies and complete configuration steps. - -Your installation is complete when these are all completed: - -1. Install the Intel® Distribution of OpenVINO™ toolkit core components - -2. Install the dependencies: - - - [Microsoft Visual Studio* with C++ **2019 or 2017** with MSBuild](http://visualstudio.microsoft.com/downloads/) - - [CMake **2.8.12 or higher** 64-bit](https://cmake.org/download/) - > **NOTE**: If you want to use Microsoft Visual Studio 2019, you are required to install CMake 3.14. - - [Python **3.5**-**3.7** 64-bit](https://www.python.org/downloads/windows/) - > **IMPORTANT**: As part of this installation, make sure you click the option to add the application to your `PATH` environment variable. - -3. Set Environment Variables - -4. Configure the Model Optimizer - -5. Run two Verification Scripts to Verify Installation - -6. Install your compatible hardware from the list of supported hardware
- -7. Use the Face Detection Tutorial - -### About the Intel® Distribution of OpenVINO™ toolkit - -The Intel® Distribution of OpenVINO™ toolkit speeds the deployment of applications and solutions that emulate human vision. Based on Convolutional Neural Networks (CNN), the toolkit extends computer vision (CV) workloads across Intel® hardware to maximize performance. - -The Intel® Distribution of OpenVINO™ toolkit includes the Intel® Deep Learning Deployment Toolkit (Intel® DLDT). For more information, see the online [Intel® Distribution of OpenVINO™ toolkit Overview](https://software.intel.com/en-us/OpenVINO-toolkit) page. - -The Intel® Distribution of OpenVINO™ toolkit for Windows\* with FPGA Support: - -- Enables CNN-based deep learning inference on the edge -- Supports heterogeneous execution across Intel® CPU, Intel® Integrated Graphics, Intel® FPGA, Intel® Neural Compute Stick 2 -- Speeds time-to-market via an easy-to-use library of computer vision functions and pre-optimized kernels -- Includes optimized calls for computer vision standards including OpenCV\* and OpenCL™ - -#### Included in the Installation Package - -The following components are installed by default: - -| Component | Description | -|-----------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [Model Optimizer](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) | This tool imports, converts, and optimizes models that were trained in popular frameworks to a format usable by Intel tools, especially the Inference Engine. 
Popular frameworks include Caffe\*, TensorFlow\*, MXNet\*, and ONNX\*. | -| [Inference Engine](../IE_DG/inference_engine_intro.md) | This is the engine that runs the deep learning model. It includes a set of libraries for an easy inference integration into your applications. | -| Pre-compiled FPGA bitstream samples | Pre-compiled bitstream samples for the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA, and Intel® Vision Accelerator Design with an Intel® Arria 10 FPGA SG2. | -| Intel® FPGA SDK for OpenCL™ software technology | The Intel® FPGA RTE for OpenCL™ provides utilities, host runtime libraries, drivers, and RTE-specific libraries and files | -| [OpenCV](https://docs.opencv.org/master/) | OpenCV\* community version compiled for Intel® hardware | -| [Inference Engine Code Samples](../IE_DG/Samples_Overview.md) | A set of simple console applications demonstrating how to utilize specific OpenVINO capabilities in an application and how to perform specific tasks, such as loading a model, running inference, querying specific device capabilities, and more. | -| [Demo Applications](@ref omz_demos_README) | A set of simple console applications that provide robust application templates to help you implement specific deep learning scenarios. | - - -### System Requirements - -The development and target platforms have the same requirements, but you can select different components during the installation, based on your intended use. - -**Hardware** - -* 6th to 10th generation Intel® Core™ processors and Intel® Xeon® processors -* Intel® Xeon® processor E family (formerly code named Sandy Bridge, Ivy Bridge, Haswell, and Broadwell) -* 3rd generation Intel® Xeon® Scalable processor (formerly code named Cooper Lake) -* Intel® Xeon® Scalable processor (formerly Skylake and Cascade Lake) -* Intel Atom® processor with support for Intel® Streaming SIMD Extensions 4.1 (Intel® SSE4.1) -* Intel Pentium® processor N4200/5, N3350/5, or N3450/5 with Intel® HD Graphics -* Intel® Neural Compute Stick 2 -* Intel® Vision Accelerator Design with Intel® Movidius™ VPUs -* Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (Mustang-F100-A10) SG2 - -> **NOTE**: With OpenVINO™ 2020.4 release, Intel® Movidius™ Neural Compute Stick is no longer supported. - -> **NOTE**: With OpenVINO™ 2020.4 release, Intel® Programmable Acceleration Card (PAC) with Intel® Arria® 10 GX FPGA is no longer supported on Windows. - -**Processor Notes:** - -- Processor graphics are not included in all processors. See [Product Specifications](https://ark.intel.com/) for information about your processor. -- A chipset that supports processor graphics is required for Intel® Xeon® processors. - -**Operating Systems:** - -- Microsoft Windows 10*, 64-bit - -**Software** -- [Microsoft Visual Studio* with C++ **2019 or 2017** with MSBuild](http://visualstudio.microsoft.com/downloads/) -- [CMake **2.8.12 or higher** 64-bit](https://cmake.org/download/) - > **NOTE**: If you want to use Microsoft Visual Studio 2019, you are required to install CMake 3.14. -- [Python **3.6.5** 64-bit](https://www.python.org/downloads/release/python-365/) - -## Installation Steps - -### Install the Intel® Distribution of OpenVINO™ toolkit Core Components - -1. If you have not downloaded the Intel® Distribution of OpenVINO™ toolkit for Windows* with FPGA Support, [download the latest version](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit/choose-download/windows-fpga.html). By default, the file is saved to the `Downloads` directory as `w_openvino_toolkit_fpga_p_.exe`. -Select the Intel® Distribution of OpenVINO™ toolkit for Windows with FPGA Support package from the dropdown menu. - -2. Go to the `Downloads` folder and double-click `w_openvino_toolkit_fpga_p_.exe`. A window opens to let you choose your installation directory and components. You can also select only the bitstreams for your card. This will allow you to minimize the size of the installation by several gigabytes. The default installation directory is `C:\Program Files (x86)\IntelSWTools\openvino_`, for simplicity, a shortcut to the latest installation is also created: `C:\Program Files (x86)\IntelSWTools\openvino`. If you choose a different installation directory, the installer will create the directory for you. For the default options, the **Installation summary** GUI screen looks like this:: - - ![](../img/openvino-install-windows-fpga-01.png) - -3. Click **Next**. - -4. You are asked if you want to provide consent to gather information. Choose the option of your choice. Click **Next**. - -5. If you are missing external dependencies, you will see a warning screen. Write down the dependencies you are missing. **You need to take no other action at this time**. After installing the Intel® Distribution of OpenVINO™ toolkit core components, install the missing dependencies. -The screen example below indicates you are missing one dependency: - - ![](../img/openvino-install-windows-fpga-02.png) - -6. Click **Next**. - -7. When the first part of installation is complete, the final screen informs you that the core components have been installed and additional steps still required: - - ![](../img/openvino-install-windows-fpga-03.png) - -8. Click **Finish** to close the installation wizard. A new browser window opens to the next section of the installation guide to set the environment variables. You are in the same document. The new window opens in case you ran the installation without first opening this installation guide. - -9. If the installation indicated you must install dependencies, install them first. If there are no missing dependencies, you can go ahead and set the environment variables. - -### Set the Environment Variables - -> **NOTE**: If you installed the Intel® Distribution of OpenVINO™ to the non-default install directory, replace `C:\Program Files (x86)\IntelSWTools` with the directory in which you installed the software. - -You must update several environment variables before you can compile and run OpenVINO™ applications. Open the Command Prompt, and run the `setupvars.bat` batch file to temporarily set your environment variables: -```sh -cd C:\Program Files (x86)\IntelSWTools\openvino\bin\ -``` - -```sh -setupvars.bat -``` - -(Optional): OpenVINO toolkit environment variables are removed when you close the Command Prompt window. As an option, you can permanently set the environment variables manually. - -The environment variables are set. Continue to the next section to configure the Model Optimizer. - -## Configure the Model Optimizer - -> **IMPORTANT**: These steps are required. You must configure the Model Optimizer for at least one framework. The Model Optimizer will fail if you do not complete the steps in this section. - -> **NOTE**: If you see an error indicating Python is not installed when you know you installed it, your computer might not be able to find the program. For the instructions to add Python to your system environment variables, see Update Your Windows Environment Variables. - -The Model Optimizer is a key component of the Intel® Distribution of OpenVINO™ toolkit. You cannot do inference on your trained model without running the model through the Model Optimizer. When you run a pre-trained model through the Model Optimizer, your output is an Intermediate Representation (IR) of the network. The IR is a pair of files that describe the whole model: - -- `.xml`: Describes the network topology -- `.bin`: Contains the weights and biases binary data - -The Inference Engine reads, loads, and infers the IR files, using a common API across the CPU, GPU, or VPU hardware. - -The Model Optimizer is a Python*-based command line tool (`mo.py`), which is located in `C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\model_optimizer`. Use this tool on models trained with popular deep learning frameworks such as Caffe\*, TensorFlow\*, MXNet\*, and ONNX\* to convert them to an optimized IR format that the Inference Engine can use. - -This section explains how to use scripts to configure the Model Optimizer either for all of the supported frameworks at the same time or for individual frameworks. If you want to manually configure the Model Optimizer instead of using scripts, see the **Using Manual Configuration Process** section on the [Configuring the Model Optimizer](../MO_DG/prepare_model/Config_Model_Optimizer.md) page. - -For more information about the Model Optimizer, see the [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). - - -### Model Optimizer Configuration Steps - -You can configure the Model Optimizer either for all supported frameworks at once or for one framework at a time. Choose the option that best suits your needs. If you see error messages, make sure you installed all dependencies. - -> **IMPORTANT**: The Internet access is required to execute the following steps successfully. If you have access to the Internet through the proxy server only, please make sure that it is configured in your environment. - -> **NOTE**: -> In the steps below: -> - If you you want to use the Model Optimizer from another installed versions of Intel® Distribution of OpenVINO™ toolkit installed, replace `openvino` with `openvino_`. -> - If you installed the Intel® Distribution of OpenVINO™ toolkit to the non-default installation directory, replace `C:\Program Files (x86)\IntelSWTools` with the directory where you installed the software. - -These steps use a command prompt to make sure you see error messages. - -#### Option 1: Configure the Model Optimizer for all supported frameworks at the same time: - -1. Open a command prompt. To do so, type `cmd` in your **Search Windows** box and then press **Enter**. -Type commands in the opened window: - - ![](../img/command_prompt.PNG) - -2. Go to the Model Optimizer prerequisites directory.
-```sh -cd C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\model_optimizer\install_prerequisites -``` - -3. Run the following batch file to configure the Model Optimizer for Caffe\*, TensorFlow\* 1.x, MXNet\*, Kaldi\*, and ONNX\*:
-```sh -install_prerequisites.bat -``` - -#### Option 2: Configure the Model Optimizer for each framework separately: - -1. Go to the Model Optimizer prerequisites directory:
-```sh -cd C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\model_optimizer\install_prerequisites -``` - -2. Run the batch file for the framework you will use with the Model Optimizer. You can use more than one: - - * For **Caffe**:
- ```sh - install_prerequisites_caffe.bat - ``` - - * For **TensorFlow 1.x**:
- ```sh - install_prerequisites_tf.bat - ``` - - * For **TensorFlow 2.x**:
- ```sh - install_prerequisites_tf2.bat - ``` - - * For **MXNet**:
- ```sh - install_prerequisites_mxnet.bat - ``` - - * For **ONNX**: - ```sh - install_prerequisites_onnx.bat - ``` - - * For **Kaldi**: - ```sh - install_prerequisites_kaldi.bat - ``` - -The Model Optimizer is configured for one or more frameworks. Success is indicated by a screen similar to this: - -![](../img/Configure-MO.PNG) - -You are ready to use two short demos to see the results of running the Intel Distribution of OpenVINO toolkit and to verify your installation was successful. The demo scripts are required since they perform additional configuration steps. Continue to the next section. - -If you want to use a GPU or VPU, or update your Windows* environment variables, read through the Optional Steps section. - - -## Use Verification Scripts to Verify Your Installation - -> **IMPORTANT**: This section is required. In addition to confirming your installation was successful, demo scripts perform other steps, such as setting up your computer to use the Inference Engine samples. - -> **NOTE**: -> The paths in this section assume you used the default installation directory. If you used a directory other than `C:\Program Files (x86)\IntelSWTools`, update the directory with the location where you installed the software. -To verify the installation and compile two samples, run the verification applications provided with the product on the CPU: - -1. Open a command prompt window. - -2. Go to the Inference Engine demo directory:
- ```sh - cd C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\demo\ - ``` - -3. Run the verification scripts by following the instructions in the next section. - - -### Run the Image Classification Verification Script - -To run the script, start the `demo_squeezenet_download_convert_run.bat` file: -```sh -demo_squeezenet_download_convert_run.bat -``` - -This script downloads a SqueezeNet model, uses the Model Optimizer to convert the model to the `.‍bin` and `.‍xml` Intermediate Representation (IR) files. The Inference Engine requires this model conversion so it can use the IR as input and achieve optimum performance on Intel hardware.
-This verification script builds the [Image Classification Sample Async](../../inference-engine/samples/classification_sample_async/README.md) application and run it with the `car.png` image in the demo directory. For a brief description of the Intermediate Representation, see Configuring the Model Optimizer. - -When the verification script completes, you will have the label and confidence for the top-10 categories: -![](../img/image_classification_script_output_win.png) - -This demo is complete. Leave the console open and continue to the next section to run the Inference Pipeline demo. - - -### Run the Inference Pipeline Verification Script - -To run the script, start the `demo_security_barrier_camera.bat` file while still in the console: -```sh -demo_security_barrier_camera.bat -``` - -This script downloads three pre-trained model IRs, builds the [Security Barrier Camera Demo](@ref omz_demos_security_barrier_camera_demo_README) application, and runs it with the downloaded models and the `car_1.bmp` image from the `demo` directory to show an inference pipeline. The verification script uses vehicle recognition in which vehicle attributes build on each other to narrow in on a specific attribute. - -First, an object is identified as a vehicle. This identification is used as input to the next model, which identifies specific vehicle attributes, including the license plate. Finally, the attributes identified as the license plate are used as input to the third model, which recognizes specific characters in the license plate. - -When the demo completes, you have two windows open: - - * A console window that displays information about the tasks performed by the demo - * An image viewer window that displays a resulting frame with detections rendered as bounding boxes, similar to the following: - - ![](../img/inference_pipeline_script_win.png) - -Close the image viewer window to end the demo. - -To learn more about the verification scripts, see `README.txt` in `C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\demo`. - -For detailed description of the OpenVINO™ pre-trained object detection and object recognition models, see the [Overview of OpenVINO™ toolkit Pre-Trained Models](@ref omz_models_intel_index) page. - -In this section, you saw a preview of the Intel® Distribution of OpenVINO™ toolkit capabilities. - -Congratulations. You have completed all the required installation, configuration, and build steps to work with your trained models using CPU. - -If you want to use Intel® Processor graphics (GPU), Intel® Neural Compute Stick 2 or Intel® Vision Accelerator Design with Intel® Movidius™ (VPU), or add CMake* and Python* to your Windows* environment variables, read through the next section for additional steps. - -If you want to continue and run the Image Classification Sample Application on one of the supported hardware device, see the [Run the Image Classification Sample Application](#run-the-image-classification-sample-application) section. - -## Install and Configure Your Compatible FPGA Hardware - -Install your compatible hardware from the list of supported components below. - -> **NOTE**: Once you've completed your hardware installation, you'll return to this guide to finish installation and configuration of the Intel® Distribution of OpenVINO™ toolkit. - -Links to install and configure compatible hardware -- [The Intel® Vision Accelerator Design with an Intel® Arria 10 FPGA SG2 (Mustang-F100-A10)](VisionAcceleratorFPGA_Configure_Windows.md) - -Congratulations, you have finished the Intel® Distribution of OpenVINO™ toolkit installation for FPGA. To learn more about how the Intel® Distribution of OpenVINO™ toolkit works, the Hello World tutorial and other resources are provided below. - -## Optional Steps - -Use the optional steps below if you want to: -* Infer models on Intel® Processor Graphics -* Infer models on Intel® Vision Accelerator Design with Intel® Movidius™ VPUs -* Add CMake* or Python* to your Windows* environment variables. - -### Optional: Additional Installation Steps for Intel® Processor Graphics (GPU) - -> **NOTE**: These steps are required only if you want to use a GPU. - -If your applications offload computation to Intel® Integrated Graphics, you must have the Intel Graphics Driver for Windows version 15.65 or higher. To see if you have this driver installed: - -1. Type **device manager** in your **Search Windows** box. The **Device Manager** opens. - -2. Click the drop-down arrow to view the **Display adapters**. You see the adapter that is installed in your computer: - - ![](../img/DeviceManager.PNG) - -3. Right-click the adapter name and select **Properties**. - -4. Click the **Driver** tab to see the driver version. Make sure the version number is 15.65 or higher. - - ![](../img/DeviceDriverVersion.PNG) - -5. If your device driver version is lower than 15.65, [download and install a higher version](http://downloadcenter.intel.com/product/80939/Graphics-Drivers). - -You are done updating your device driver and are ready to use your GPU. - - -### Optional: Additional Installation Steps for the Intel® Vision Accelerator Design with Intel® Movidius™ VPUs - -> **NOTE**: These steps are required only if you want to use Intel® Vision Accelerator Design with Intel® Movidius™ VPUs. - -To perform inference on Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, the following additional installation steps are required: - - 1. If your Intel® Vision Accelerator Design with Intel® Movidius™ VPUs card requires SMBUS connection to PCIe slot (Raw video data card with HW version Fab-B and before), install the SMBUS driver: - 1. Go to the `\deployment_tools\inference-engine\external\hddl\SMBusDriver` directory, where `` is the directory in which the Intel Distribution of OpenVINO toolkit is installed. - 2. Right click on the `hddlsmbus.inf` file and choose **Install** from the pop up menu. - - 2. Download and install Visual C++ Redistributable for Visual Studio 2017 - -You are done installing your device driver and are ready to use your Intel® Vision Accelerator Design with Intel® Movidius™ VPUs. - -See also: - -* For advanced configuration steps for your IEI Mustang-V100-MX8 accelerator, see [Intel® Movidius™ VPUs Setup Guide for Use with Intel® Distribution of OpenVINO™ toolkit](movidius-setup-guide.md). - -* After you've configurated your Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, see [Intel® Movidius™ VPUs Programming Guide for Use with Intel® Distribution of OpenVINO™ toolkit](movidius-programming-guide.md) to learn how to distribute a model across all 8 VPUs to maximize performance. - -After configuration is done, you are ready to run the verification scripts with the HDDL Plugin for your Intel® Vision Accelerator Design with Intel® Movidius™ VPUs. - -1. Open a command prompt window. - -2. Go to the Inference Engine demo directory: - ```sh - cd C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\demo\ - ``` -3. Run the Image Classification verification script. If you have access to the Internet through the proxy server only, please make sure that it is configured in your environment. - ```sh - demo_squeezenet_download_convert_run.bat -d HDDL - ``` -4. Run the Inference Pipeline verification script: - ```sh - demo_security_barrier_camera.bat -d HDDL - ``` - -### Optional: Update Your Windows Environment Variables - -> **NOTE**: These steps are only required under special circumstances, such as if you forgot to check the box during the CMake\* or Python\* installation to add the application to your Windows `PATH` environment variable. - -Use these steps to update your Windows `PATH` if a command you execute returns an error message stating that an application cannot be found. This might happen if you do not add CMake or Python to your `PATH` environment variable during the installation. - -1. In your **Search Windows** box, type **Edit the system environment variables** and press **Enter**. A window similar to the following displays: - ![](../img/System_Properties.PNG) - -2. At the bottom of the screen, click **Environment Variables**. - -3. Under **System variables**, click **Path** and then **Edit**: - ![](../img/Environment_Variables-select_Path.PNG) - -4. In the opened window, click **Browse**. A browse window opens: - ![](../img/Add_Environment_Variable.PNG) - -5. If you need to add CMake to the `PATH`, browse to the directory in which you installed CMake. The default directory is `C:\Program Files\CMake`. - -6. If you need to add Python to the `PATH`, browse to the directory in which you installed Python. The default directory is `C:\Users\\AppData\Local\Programs\Python\Python36\Python`. - -7. Click **OK** repeatedly to close each screen. - -Your `PATH` environment variable is updated. - -## Hello World Face Detection Tutorial - -Refer to the [OpenVINO™ with FPGA Hello World Face Detection Exercise](https://github.com/intel-iot-devkit/openvino-with-fpga-hello-world-face-detection). - -**Additional Resources** - -- Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit). -- OpenVINO™ toolkit online documentation: [https://docs.openvinotoolkit.org](https://docs.openvinotoolkit.org). -- [Inference Engine FPGA plugin documentation](../IE_DG/supported_plugins/FPGA.md). -- [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). -- For more information on Sample Applications, see the [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md). -- To learn about pre-trained models for OpenVINO™ toolkit, see the [Pre-Trained Models Overview](@ref omz_models_intel_index). -- For information on Inference Engine Tutorials, see the [Inference Tutorials](https://github.com/intel-iot-devkit/inference-tutorials-generic). -- For IoT Libraries & Code Samples see the [Intel® IoT Developer Kit](https://github.com/intel-iot-devkit). - -To learn more about converting models, go to: - -- [Convert Your Caffe* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_Caffe.md) -- [Convert Your TensorFlow* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md) -- [Convert Your MXNet* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md) -- [Convert Your ONNX* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md) \ No newline at end of file +For installation instructions for the last release of Intel® Distribution of OpenVINO™ toolkit for Windows* with FPGA Support, see documentation for the [2020.4 version](https://docs.openvinotoolkit.org/2020.4/openvino_docs_install_guides_installing_openvino_windows_fpga.html). \ No newline at end of file diff --git a/docs/install_guides/installing-openvino-windows.md b/docs/install_guides/installing-openvino-windows.md index e7866b5b9ac898..ea560c8adcc17a 100644 --- a/docs/install_guides/installing-openvino-windows.md +++ b/docs/install_guides/installing-openvino-windows.md @@ -2,8 +2,6 @@ > **NOTES**: > - This guide applies to Microsoft Windows\* 10 64-bit. For Linux* OS information and instructions, see the [Installation Guide for Linux](installing-openvino-linux.md). -> - For the Intel® Distribution of OpenVINO™ toolkit for Windows* 10 with FPGA -support, see [Installation Guide for Windows* with FPGA support](installing-openvino-windows-fpga.md). > - [Intel® System Studio](https://software.intel.com/en-us/system-studio) is an all-in-one, cross-platform tool suite, purpose-built to simplify system bring-up and improve system and IoT device application performance on Intel® platforms. If you are using the Intel® Distribution of OpenVINO™ with Intel® System Studio, go to [Get Started with Intel® System Studio](https://software.intel.com/en-us/articles/get-started-with-openvino-and-intel-system-studio-2019). ## Introduction From 36ab5d6454249c7f699634caeb85575e00b742cc Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Fri, 2 Oct 2020 13:32:25 +0300 Subject: [PATCH 06/41] Updated according to CVS-38225 --- docs/install_guides/installing-openvino-yocto.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/docs/install_guides/installing-openvino-yocto.md b/docs/install_guides/installing-openvino-yocto.md index d992fe5c1e41f5..c5340095d90a3a 100644 --- a/docs/install_guides/installing-openvino-yocto.md +++ b/docs/install_guides/installing-openvino-yocto.md @@ -1,7 +1,7 @@ # Create a Yocto* Image with OpenVINO™ toolkit {#openvino_docs_install_guides_installing_openvino_yocto} This document provides instructions for creating a Yocto* image with OpenVINO™ toolkit. -Instructions were validated and tested for [Yocto OpenVINO 2020.3 release](http://git.yoctoproject.org/cgit/cgit.cgi/meta-intel). +Instructions were validated and tested for [Yocto OpenVINO 2020.4 release](http://git.yoctoproject.org/cgit/cgit.cgi/meta-intel). ## System Requirements Use the [Yocto Project* official documentation](https://www.yoctoproject.org/docs/latest/mega-manual/mega-manual.html#brief-compatible-distro) to set up and configure your host machine to be compatible with BitBake*. @@ -60,9 +60,6 @@ CORE_IMAGE_EXTRA_INSTALL_append = " openvino-inference-engine-samples" # Include inference engine python API package in the target image. CORE_IMAGE_EXTRA_INSTALL_append = " openvino-inference-engine-python3" -# This adds inference engine unit tests in the target image. -CORE_IMAGE_EXTRA_INSTALL_append = " openvino-inference-engine-ptest" - # Enable MYRIAD plugin CORE_IMAGE_EXTRA_INSTALL_append = " openvino-inference-engine-vpu-firmware" @@ -90,7 +87,6 @@ Verify that it returns the list of packages below: openvino-inference-engine openvino-inference-engine-dbg openvino-inference-engine-dev -openvino-inference-engine-ptest openvino-inference-engine-python3 openvino-inference-engine-samples openvino-inference-engine-src From a4d89584dd876e6a38c80e14463ec784c3747167 Mon Sep 17 00:00:00 2001 From: Nikolay Tyukaev Date: Fri, 2 Oct 2020 13:43:34 +0300 Subject: [PATCH 07/41] some changes --- docs/doxygen/openvino_docs.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/doxygen/openvino_docs.xml b/docs/doxygen/openvino_docs.xml index 1ffe0005cf1d7a..157d655f0dec7d 100644 --- a/docs/doxygen/openvino_docs.xml +++ b/docs/doxygen/openvino_docs.xml @@ -4,8 +4,8 @@ - - + + From dc0895914d0a5c714200cb95a9ea4922b961f11c Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Sat, 3 Oct 2020 18:23:58 +0300 Subject: [PATCH 08/41] Made changes to benchmarks according to review comments --- docs/benchmarks/performance_benchmarks.md | 3 ++- docs/benchmarks/performance_int8_vs_fp32.md | 2 +- docs/doxygen/openvino_docs.xml | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/benchmarks/performance_benchmarks.md b/docs/benchmarks/performance_benchmarks.md index 5b3f91c1200b5d..b3aa61b7f1155d 100644 --- a/docs/benchmarks/performance_benchmarks.md +++ b/docs/benchmarks/performance_benchmarks.md @@ -129,7 +129,8 @@ Testing by Intel done on: see test date for each HW platform below. | | Intel® Core™ i5-8500 | Intel® Core™ i7-8700T | Intel® Core™ i9-10920X | 11th Gen Intel® Core™ i5-1145G7E | | -------------------- | ---------------------------------- | ----------------------------------- |--------------------------------------|-----------------------------------| -| Motherboard | ASUS* PRIME Z370-A | GIGABYTE* Z370M DS3H-CF | ASUS* PRIME X299-A II | Intel Corporation /
TigerLake U DDR4 SODIMM RVP | +| Motherboard | ASUS* PRIME Z370-A | GIGABYTE* Z370M DS3H-CF | ASUS* PRIME X299-A II | Intel Corporation /
internal/ +Reference Validation Platform | | CPU | Intel® Core™ i5-8500 CPU @ 3.00GHz | Intel® Core™ i7-8700T CPU @ 2.40GHz | Intel® Core™ i9-10920X CPU @ 3.50GHz | 11th Gen Intel® Core™ i5-1145G7E @ 2.60GHz | | Hyper Threading | OFF | ON | ON | ON | | Turbo Setting | ON | ON | ON | ON | diff --git a/docs/benchmarks/performance_int8_vs_fp32.md b/docs/benchmarks/performance_int8_vs_fp32.md index 02a48d08146c19..6043b68f2ce63d 100644 --- a/docs/benchmarks/performance_int8_vs_fp32.md +++ b/docs/benchmarks/performance_int8_vs_fp32.md @@ -10,7 +10,7 @@ The table below illustrates the speed-up factor for the performance gain by swit Intel® Xeon®
Gold
5218T Intel® Xeon®
Platinum
8270 Intel® Core™
i7-1065G7 - Intel® Core™
i7-1145G7E + Intel® Core™
i5-1145G7E OpenVINO
benchmark
model name diff --git a/docs/doxygen/openvino_docs.xml b/docs/doxygen/openvino_docs.xml index 3e930e6443609d..70e57db50e6b7a 100644 --- a/docs/doxygen/openvino_docs.xml +++ b/docs/doxygen/openvino_docs.xml @@ -49,6 +49,7 @@ + From 312a33c66a7e4a0c2bb90b36a9433953dd17ec14 Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Mon, 5 Oct 2020 19:22:32 +0300 Subject: [PATCH 09/41] Added logo info to the Legal_Information, updated Ubuntu, CentOS supported versions --- docs/IE_DG/Samples_Overview.md | 8 +++++--- docs/IE_DG/supported_plugins/CPU.md | 4 ++-- .../CONTRIBUTING.md | 2 +- docs/Legal_Information.md | 7 +++++++ docs/install_guides/deployment-manager-tool.md | 2 +- docs/install_guides/installing-openvino-conda.md | 4 ++-- docs/install_guides/installing-openvino-linux.md | 3 +-- 7 files changed, 19 insertions(+), 11 deletions(-) diff --git a/docs/IE_DG/Samples_Overview.md b/docs/IE_DG/Samples_Overview.md index 6f4411d47babd7..08b509628a6687 100644 --- a/docs/IE_DG/Samples_Overview.md +++ b/docs/IE_DG/Samples_Overview.md @@ -49,9 +49,11 @@ You can download the [pre-trained models](@ref omz_models_intel_index) using the The officially supported Linux* build environment is the following: -* Ubuntu* 16.04 LTS 64-bit or CentOS* 7.4 64-bit -* GCC* 5.4.0 (for Ubuntu* 16.04) or GCC* 4.8.5 (for CentOS* 7.4) -* CMake* version 2.8.12 or higher +* Ubuntu* 18.04 LTS 64-bit or CentOS* 7.6 64-bit +* GCC* 7.5.0 (for Ubuntu* 18.04) or GCC* 4.8.5 (for CentOS* 7.6) +* CMake* version 3.10 or higher + +> **NOTE**: For building samples from the open-source version of OpenVINO™ toolkit, see the [build instructions on GitHub](https://github.com/openvinotoolkit/openvino/blob/master/build-instruction.md). To build the C or C++ sample applications for Linux, go to the `/inference_engine/samples/c` or `/inference_engine/samples/cpp` directory, respectively, and run the `build_samples.sh` script: ```sh diff --git a/docs/IE_DG/supported_plugins/CPU.md b/docs/IE_DG/supported_plugins/CPU.md index df9693a8f37c61..3d41c6030f1511 100644 --- a/docs/IE_DG/supported_plugins/CPU.md +++ b/docs/IE_DG/supported_plugins/CPU.md @@ -14,8 +14,8 @@ OpenVINO™ toolkit is officially supported and validated on the following platf | Host | OS (64-bit) | | :--- | :--- | -| Development | Ubuntu* 16.04/CentOS* 7.4/MS Windows* 10 | -| Target | Ubuntu* 16.04/CentOS* 7.4/MS Windows* 10 | +| Development | Ubuntu* 18.04, CentOS* 7.5, MS Windows* 10 | +| Target | Ubuntu* 18.04, CentOS* 7.5, MS Windows* 10 | The CPU Plugin supports inference on Intel® Xeon® with Intel® Advanced Vector Extensions 2 (Intel® AVX2), Intel® Advanced Vector Extensions 512 (Intel® AVX-512), and AVX512_BF16, Intel® Core™ Processors with Intel® AVX2, Intel Atom® Processors with Intel® Streaming SIMD Extensions (Intel® SSE). diff --git a/docs/Inference_Engine_Development_Procedure/CONTRIBUTING.md b/docs/Inference_Engine_Development_Procedure/CONTRIBUTING.md index b121254303ebf9..1df9b7a97e5a96 100644 --- a/docs/Inference_Engine_Development_Procedure/CONTRIBUTING.md +++ b/docs/Inference_Engine_Development_Procedure/CONTRIBUTING.md @@ -1,7 +1,7 @@ # Inference Engine development configuration document {#openvino_docs_Inference_Engine_Development_Procedure_CONTRIBUTING} To create MakeFiles use following process or run build-after-clone.sh script located in the root -folder if you use Ubuntu 16.04. +folder if you use Ubuntu 18.04. To create Visual Studio project run create_vs_proj_x64.cmd from scripts folder. ## Setting up the environment for development diff --git a/docs/Legal_Information.md b/docs/Legal_Information.md index 4bcb046a8909d9..00c6cd968357e6 100644 --- a/docs/Legal_Information.md +++ b/docs/Legal_Information.md @@ -15,3 +15,10 @@ Your costs and results may vary. Intel technologies may require enabled hardware, software or service activation. © Intel Corporation. Intel, the Intel logo, and other Intel marks are trademarks of Intel Corporation or its subsidiaries. \*Other names and brands may be claimed as the property of others. + +## OpenVINO™ Logo +To build equity around the project, the OpenVINO logo was created for both Intel and community usage. The logo may only be used to represent the OpenVINO toolkit and offerings built using the OpenVINO toolkit. + +## Logo Usage Guidelines +The OpenVINO logo must be used in connection with truthful, non-misleading references to the OpenVINO toolkit, and for no other purpose. +Modification of the logo or use of any separate element(s) of the logo alone is not allowed. diff --git a/docs/install_guides/deployment-manager-tool.md b/docs/install_guides/deployment-manager-tool.md index 64fbbdee5e0e19..09839a486850be 100644 --- a/docs/install_guides/deployment-manager-tool.md +++ b/docs/install_guides/deployment-manager-tool.md @@ -18,7 +18,7 @@ The Deployment Manager is a Python\* command-line tool that is delivered within * Steps for the Intel® Vision Accelerator Design with Intel® Movidius™ VPUs -> **IMPORTANT**: The operating system on the target host must be the same as the development system on which you are creating the package. For example, if the target system is Ubuntu 16.04, the deployment package must be created from the OpenVINO™ toolkit installed on Ubuntu 16.04. +> **IMPORTANT**: The operating system on the target host must be the same as the development system on which you are creating the package. For example, if the target system is Ubuntu 18.04, the deployment package must be created from the OpenVINO™ toolkit installed on Ubuntu 18.04. ## Create Deployment Package Using Deployment Manager diff --git a/docs/install_guides/installing-openvino-conda.md b/docs/install_guides/installing-openvino-conda.md index e25758c9b35863..c491c862a682dc 100644 --- a/docs/install_guides/installing-openvino-conda.md +++ b/docs/install_guides/installing-openvino-conda.md @@ -10,7 +10,7 @@ This guide provides installation steps for Intel® Distribution of OpenVINO™ t **Operating Systems** - Ubuntu* 18.04 long-term support (LTS), 64-bit -- CentOS* 7.4, 64-bit +- CentOS* 7.6, 64-bit - macOS* 10.14.x versions. - Windows 10*, 64-bit Pro, Enterprise or Education (1607 Anniversary Update, Build 14393 or higher) editions - Windows Server* 2016 or higher @@ -30,7 +30,7 @@ This guide provides installation steps for Intel® Distribution of OpenVINO™ t ```sh conda install openvino-ie4py-ubuntu18 -c intel ``` - - CentOS* 7.4 + - CentOS* 7.6 ```sh conda install openvino-ie4py-centos7 -c intel ``` diff --git a/docs/install_guides/installing-openvino-linux.md b/docs/install_guides/installing-openvino-linux.md index c4088aac99fd4e..0e61957471c33c 100644 --- a/docs/install_guides/installing-openvino-linux.md +++ b/docs/install_guides/installing-openvino-linux.md @@ -23,7 +23,6 @@ The Intel® Distribution of OpenVINO™ toolkit for Linux\*: |-----------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [Model Optimizer](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) | This tool imports, converts, and optimizes models that were trained in popular frameworks to a format usable by Intel tools, especially the Inference Engine. 
Popular frameworks include Caffe\*, TensorFlow\*, MXNet\*, and ONNX\*. | | [Inference Engine](../IE_DG/inference_engine_intro.md) | This is the engine that runs the deep learning model. It includes a set of libraries for an easy inference integration into your applications. | -| Drivers and runtimes for OpenCL™ version 2.1 | Enables OpenCL on the GPU/CPU for Intel® processors | | Intel® Media SDK | Offers access to hardware accelerated video codecs and frame processing | | [OpenCV](https://docs.opencv.org/master/) | OpenCV\* community version compiled for Intel® hardware | | [Inference Engine Code Samples](../IE_DG/Samples_Overview.md) | A set of simple console applications demonstrating how to utilize specific OpenVINO capabilities in an application and how to perform specific tasks, such as loading a model, running inference, querying specific device capabilities, and more. | @@ -54,7 +53,7 @@ The Intel® Distribution of OpenVINO™ toolkit for Linux\*: **Operating Systems** - Ubuntu 18.04.x long-term support (LTS), 64-bit -- CentOS 7.4, 64-bit (for target only) +- CentOS 7.6, 64-bit (for target only) - Yocto Project v3.0, 64-bit (for target only and requires modifications) ## Overview From bc9ef3beb64e618e81a684cbe34b8553e541b0c2 Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Mon, 5 Oct 2020 23:20:36 +0300 Subject: [PATCH 10/41] =?UTF-8?q?Updated=20supported=20Intel=C2=AE=20Core?= =?UTF-8?q?=E2=84=A2=20processors=20list?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/install_guides/installing-openvino-linux.md | 2 +- docs/install_guides/installing-openvino-macos.md | 2 +- docs/install_guides/installing-openvino-raspbian.md | 2 +- docs/install_guides/installing-openvino-windows.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/install_guides/installing-openvino-linux.md b/docs/install_guides/installing-openvino-linux.md index 0e61957471c33c..a1c2f77043a627 100644 --- a/docs/install_guides/installing-openvino-linux.md +++ b/docs/install_guides/installing-openvino-linux.md @@ -34,7 +34,7 @@ The Intel® Distribution of OpenVINO™ toolkit for Linux\*: **Hardware** -* 6th to 10th generation Intel® Core™ processors and Intel® Xeon® processors +* 6th to 11th generation Intel® Core™ processors and Intel® Xeon® processors * Intel® Xeon® processor E family (formerly code named Sandy Bridge, Ivy Bridge, Haswell, and Broadwell) * 3rd generation Intel® Xeon® Scalable processor (formerly code named Cooper Lake) * Intel® Xeon® Scalable processor (formerly Skylake and Cascade Lake) diff --git a/docs/install_guides/installing-openvino-macos.md b/docs/install_guides/installing-openvino-macos.md index bc4b07f8d210c2..31dd789519ce62 100644 --- a/docs/install_guides/installing-openvino-macos.md +++ b/docs/install_guides/installing-openvino-macos.md @@ -40,7 +40,7 @@ The development and target platforms have the same requirements, but you can sel > **NOTE**: The current version of the Intel® Distribution of OpenVINO™ toolkit for macOS* supports inference on Intel CPUs and Intel® Neural Compute Sticks 2 only. -* 6th to 10th generation Intel® Core™ processors and Intel® Xeon® processors +* 6th to 11th generation Intel® Core™ processors and Intel® Xeon® processors * Intel® Xeon® processor E family (formerly code named Sandy Bridge, Ivy Bridge, Haswell, and Broadwell) * 3rd generation Intel® Xeon® Scalable processor (formerly code named Cooper Lake) * Intel® Xeon® Scalable processor (formerly Skylake and Cascade Lake) diff --git a/docs/install_guides/installing-openvino-raspbian.md b/docs/install_guides/installing-openvino-raspbian.md index a9398d2d5c6312..28d620d8a1c5f2 100644 --- a/docs/install_guides/installing-openvino-raspbian.md +++ b/docs/install_guides/installing-openvino-raspbian.md @@ -144,7 +144,7 @@ mkdir build && cd build 2. Build the Object Detection Sample: ```sh -cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino/deployment_tools/inference_engine/samples +cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino/deployment_tools/inference_engine/samples*/cpp* ``` ```sh make -j2 object_detection_sample_ssd diff --git a/docs/install_guides/installing-openvino-windows.md b/docs/install_guides/installing-openvino-windows.md index 8f050474ae5fba..ecc32fd81aea4f 100644 --- a/docs/install_guides/installing-openvino-windows.md +++ b/docs/install_guides/installing-openvino-windows.md @@ -67,7 +67,7 @@ The following components are installed by default: **Hardware** -* 6th to 10th generation Intel® Core™ processors and Intel® Xeon® processors +* 6th to 11th generation Intel® Core™ processors and Intel® Xeon® processors * Intel® Xeon® processor E family (formerly code named Sandy Bridge, Ivy Bridge, Haswell, and Broadwell) * 3rd generation Intel® Xeon® Scalable processor (formerly code named Cooper Lake) * Intel® Xeon® Scalable processor (formerly Skylake and Cascade Lake) From eb7e5bcc1b8a6332844c871d6043135887b1f07c Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Mon, 5 Oct 2020 23:27:49 +0300 Subject: [PATCH 11/41] Fixed table formatting --- docs/benchmarks/performance_benchmarks.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/benchmarks/performance_benchmarks.md b/docs/benchmarks/performance_benchmarks.md index b3aa61b7f1155d..9f172d82d99ae8 100644 --- a/docs/benchmarks/performance_benchmarks.md +++ b/docs/benchmarks/performance_benchmarks.md @@ -129,8 +129,7 @@ Testing by Intel done on: see test date for each HW platform below. | | Intel® Core™ i5-8500 | Intel® Core™ i7-8700T | Intel® Core™ i9-10920X | 11th Gen Intel® Core™ i5-1145G7E | | -------------------- | ---------------------------------- | ----------------------------------- |--------------------------------------|-----------------------------------| -| Motherboard | ASUS* PRIME Z370-A | GIGABYTE* Z370M DS3H-CF | ASUS* PRIME X299-A II | Intel Corporation /
internal/ -Reference Validation Platform | +| Motherboard | ASUS* PRIME Z370-A | GIGABYTE* Z370M DS3H-CF | ASUS* PRIME X299-A II | Intel Corporation
internal/Reference Validation Platform | | CPU | Intel® Core™ i5-8500 CPU @ 3.00GHz | Intel® Core™ i7-8700T CPU @ 2.40GHz | Intel® Core™ i9-10920X CPU @ 3.50GHz | 11th Gen Intel® Core™ i5-1145G7E @ 2.60GHz | | Hyper Threading | OFF | ON | ON | ON | | Turbo Setting | ON | ON | ON | ON | From 182f53c5295bdc9d2ce4899c157149246df1dca9 Mon Sep 17 00:00:00 2001 From: Nikolay Tyukaev Date: Tue, 6 Oct 2020 14:09:45 +0300 Subject: [PATCH 12/41] update api layouts --- docs/doxygen/ie_c_api.xml | 10 ++++------ docs/doxygen/ie_py_api.xml | 10 ++++------ 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/docs/doxygen/ie_c_api.xml b/docs/doxygen/ie_c_api.xml index dce0fe09693a62..f14d39e19399ad 100644 --- a/docs/doxygen/ie_c_api.xml +++ b/docs/doxygen/ie_c_api.xml @@ -3,12 +3,10 @@ - - - - - - + + + + diff --git a/docs/doxygen/ie_py_api.xml b/docs/doxygen/ie_py_api.xml index 786ea13219be3e..771566e211b99b 100644 --- a/docs/doxygen/ie_py_api.xml +++ b/docs/doxygen/ie_py_api.xml @@ -3,12 +3,10 @@ - - - - - - + + + + From cd7456204d50d436752ea787c2111ecbf8301858 Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Tue, 6 Oct 2020 14:25:26 +0300 Subject: [PATCH 13/41] Added new index page with overview --- docs/img/OV-diagram-full.svg | 501 ++++++++++++++++++++++++++++++++++ docs/img/OV-diagram-step2.svg | 182 ++++++++++++ docs/img/OV-diagram-step3.svg | 196 +++++++++++++ docs/index.md | 125 ++++++--- 4 files changed, 960 insertions(+), 44 deletions(-) create mode 100644 docs/img/OV-diagram-full.svg create mode 100644 docs/img/OV-diagram-step2.svg create mode 100644 docs/img/OV-diagram-step3.svg diff --git a/docs/img/OV-diagram-full.svg b/docs/img/OV-diagram-full.svg new file mode 100644 index 00000000000000..3f0d9127f15daa --- /dev/null +++ b/docs/img/OV-diagram-full.svg @@ -0,0 +1,501 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Step 0. Planning and Setup + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Determine environments & configuration required + Determine model type & framework + + Step 1. Select a Model + + + + + + + + + + + + + + + + + + Find or Train a Model + + + + + Is themodel accurateenough? + + + + + + + + + + + + + + + + + + + + + + Train orRe-train model + + Step 1a. Train + + + + + + + + + + + + + + + + + Step 2a. Modify the Model Post-Training + Step 2. Modify + + + + + Run Model Optimizer + + + + + Did the model convert? + + + + + + + + + + + + + + + + + Fix errors and/or create custom layers + + + + + Did that work? + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Escalate to Intel or try alternative models + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Step 3a. Advance Model Tuning + Step 3. Tune + + + + + Run Inference Engine on model + + + + + Was it fast enough with acceptable accuracy? + + + + + + + + + + + + + + + + + Try advanced tuning of the IR model + + + + + Did that work? + + + + + + + + + + + + + + + + + + + + + + + Try hardware-based interventions + + + + + Try training extensions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Step 4. Deploy + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Integrate model to pipeline or application + Package for deployment + Deploy app and model + Fast Path + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/img/OV-diagram-step2.svg b/docs/img/OV-diagram-step2.svg new file mode 100644 index 00000000000000..0789cfd9c5e4a5 --- /dev/null +++ b/docs/img/OV-diagram-step2.svg @@ -0,0 +1,182 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Step 2a. Modify the Model Post-Training + Step 2. Modify + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Run Model Optimizer + Did themodel convert? + Fix errors and/ or create custom layers + Did that work? + Escalate to Intel or try alternative models + + + + + + + + + + + + + + + + + Go to next step + + + + + + + + + + + + + + + + + Go to next step + + + + + + + + + + + + + + diff --git a/docs/img/OV-diagram-step3.svg b/docs/img/OV-diagram-step3.svg new file mode 100644 index 00000000000000..7c906ce27d170e --- /dev/null +++ b/docs/img/OV-diagram-step3.svg @@ -0,0 +1,196 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Step 3a. Advance Model Tuning + Step 3. Tune + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Run Inference Engine on model + Was it fast enough with acceptable accuracy? + Try advanced tuning of theIR model + Did that work? + Tryhardware-basedinterventions + Try trainingextensions + Go to next step + Go to next step + + diff --git a/docs/index.md b/docs/index.md index 9039d7fd98b891..260b50f18b7630 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,8 +1,8 @@ -# OpenVINO™ Toolkit Documentation {#index} +# OpenVINO™ Toolkit Overview {#index} -## Introduction to OpenVINO™ Toolkit +## Introduction -OpenVINO™ toolkit quickly deploys applications and solutions that emulate human vision. Based on Convolutional Neural Networks (CNNs), the toolkit extends computer vision (CV) workloads across Intel® hardware, maximizing performance. The OpenVINO™ toolkit includes the Deep Learning Deployment Toolkit (DLDT). +OpenVINO™ toolkit is a comprehensive toolkit for quickly developing applications and solutions that solve a variety of tasks including emulation of human vision, automatic speech recognition, natural language processing, recommendation systems, and many others. Based on latest generations of artificial neural networks, including Convolutional Neural Networks (CNNs), recurrent and attention-based networks, the toolkit extends computer vision and non-vision workloads across Intel® hardware, maximizing performance. It accelerates applications with high-performance, AI and deep learning inference deployed from edge to cloud. OpenVINO™ toolkit: @@ -11,26 +11,84 @@ OpenVINO™ toolkit: - Speeds time-to-market via an easy-to-use library of computer vision functions and pre-optimized kernels - Includes optimized calls for computer vision standards, including OpenCV\* and OpenCL™ -## Toolkit Components - -OpenVINO™ toolkit includes the following components: - -- Deep Learning Deployment Toolkit (DLDT) - - [Deep Learning Model Optimizer](MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) - A cross-platform command-line tool for importing models and - preparing them for optimal execution with the Inference Engine. The Model Optimizer imports, converts, and optimizes models, which were trained in popular frameworks, such as Caffe*, - TensorFlow*, MXNet*, Kaldi*, and ONNX*. - - [Deep Learning Inference Engine](IE_DG/inference_engine_intro.md) - A unified API to allow high performance inference on many hardware types - including the following: - - Intel® CPU - - Intel® Integrated Graphics - - Intel® Neural Compute Stick 2 - - Intel® Vision Accelerator Design with Intel® Movidius™ vision processing unit (VPU) - - [Samples](IE_DG/Samples_Overview.md) - A set of simple console applications demonstrating how to use the Inference Engine in your applications - - [Tools](IE_DG/Tools_Overview.md) - A set of simple console tools to work with your models +## OpenVINO™ Toolkit Workflow + +The following diagram illustrates the typical OpenVINO™ workflow (click to see the full-size image): +![](img/OV-diagram-full.svg) + +### Model Preparation, Conversion and Optimization + +You can use your framework of choice to prepare and train a Deep Learning model or just download a pretrained model from the Open Model Zoo. The Open Model Zoo includes Deep Learning solutions to a variety of vision problems, including object recognition, face recognition, pose estimation, text detection, and action recognition, at a range of measured complexities. +Several of these pretrained models are used also in the [code samples](E_DG/Samples_Overview.md) and [application demos](@ref omz_demos_README). To download models from the Open Model Zoo, the [Model Downloader](@ref omz_tools_downloader_README) tool is used. + +One of the core component of the OpenVINO™ toolkit is the [Model Optimizer](MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) a cross-platform command-line +tool that converts a trained neural network from its source framework to an open-source, nGraph-compatible [Intermediate Representation (IR)](MO_DG/IR_and_opsets.md) for use in inference operations. The Model Optimizer imports models trained in popular frameworks such as Caffe*, TensorFlow*, MXNet*, Kaldi*, and ONNX* and performs a few optimizations to remove excess layers and group operations when possible into simpler, faster graphs. +![](img/OV-diagram-step2.svg) + +If your neural network model contains layers that are not in the list of known layers for supported frameworks, you can adjust the conversion and optimization process through use of [Custom Layers](HOWTO/Custom_Layers_Guide.md). + +Run the [Accuracy Checker utility](@ref omz_tools_accuracy_checker_README) either against source topologies or against the output representation to evaluate the accuracy of inference. The Accuracy Checker is also part of the [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction), an integrated web-based performance analysis studio. + +Useful documents for model optimization: +* [Model Optimizer Developer Guide](MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) +* [Intermediate Representation and Opsets](MO_DG/IR_and_opsets.md) +* [Custom Layers Guide](HOWTO/Custom_Layers_Guide.md) +* [Accuracy Checker utility](@ref omz_tools_accuracy_checker_README) +* [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction) +* [Model Downloader](@ref omz_tools_downloader_README) utility +* [Pretrained Models (Open Model Zoo)](@ref omz_models_public_index) + +### Running and Tuning Inference +The other core component of OpenVINO™ is the [Inference Engine](IE_DG/Deep_Learning_Inference_Engine_DevGuide.md), which manages the loading and compiling of the optimized neural network model, runs inference operations on input data, and outputs the results. Inference Engine can execute synchronously or asynchronously, and its plugin architecture manages the appropriate compilations for execution on multiple Intel® devices, including both workhorse CPUs and specialized graphics and video processing platforms (see below, Packaging and Deployment). + +You can use OpenVINO™ Tuning Utilities with the Inference Engine to trial and test inference on your model. The Benchmark utility uses an input model to run iterative tests for throughput or latency measures, and the Cross Check utility compares performance of differently configured inferences. The [Post-Training Optimization Tool](@ref pot_README) integrates a suite of quantization- and calibration-based tools to further streamline performance. + +For a full browser-based studio integrating these other key tuning utilities, try the [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction). +![](img/OV-diagram-step3.svg) + +OpenVINO™ toolkit includes a set of [inference code samples](IE_DG/Samples_Overview.md) and [application demos](@ref omz_demos_README) showing how inference is run and output processed for use in retail environments, classrooms, smart camera applications, and other solutions. + +OpenVINO also makes use of open-Source and Intel™ tools for traditional graphics processing and performance management. Intel® Media SDK supports accelerated rich-media processing, including transcoding. OpenVINO™ optimizes calls to the rich OpenCV and OpenVX libraries for processing computer vision workloads. And the new DL Streamer integration further accelerates video pipelining and performance. + +Useful documents for inference tuning: +* [Inference Engine Developer Guide](IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) +* [Inference Engine API References](./api_references.html) +* [Inference Code Samples](IE_DG/Samples_Overview.md) +* [Application Demos](@ref omz_demos_README) +* [Post-Training Optimization Tool Guide](@ref pot_README) +* [Deep Learning Workbench Guide](@ref workbench_docs_Workbench_DG_Introduction) +* [Intel Media SDK](https://github.com/Intel-Media-SDK/MediaSDK) +* [DL Streamer Samples](IE_DG/Tools_Overview.md) +* [OpenCV](https://docs.opencv.org/master/) +* [OpenVX](https://software.intel.com/en-us/openvino-ovx-guide) + +### Packaging and Deployment +The Intel Distribution of OpenVINO™ toolkit outputs optimized inference runtimes for the following devices: +* Intel® CPUs +* Intel® Processor Graphics +* Intel® Neural Compute Stick 2 +* Intel® Vision Accelerator Design with Intel® Movidius™ VPUs + +The Inference Engine's plug-in architecture can be extended to meet other specialized needs. [Deployment Manager](install_guides/deployment_manager_tool.md) is a Python* command-line tool that assembles the tuned model, IR files, your application, and required dependencies into a runtime package for your target device. It outputs packages for CPU, GPU, and VPU on Linux* and Windows*, and Neural Compute Stick-optimized packages with Linux. + +[Inference Engine Integration Workflow](IE_DG/Integrate_with_customer_application_new_API.md) +[Inference Engine API References](./api_references.html) +[Inference Engine Plug-in Developer Guide](./ie_plugin_api/index.html) +[Deployment Manager Guide](install_guides/deployment_manager_tool.md) + + +## OpenVINO™ Toolkit Components + +Intel® Distribution of OpenVINO™ toolkit includes the following components: + +- [Deep Learning Model Optimizer](MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) - A cross-platform command-line tool for importing models and preparing them for optimal execution with the Inference Engine. The Model Optimizer imports, converts, and optimizes models, which were trained in popular frameworks, such as Caffe*, TensorFlow*, MXNet*, Kaldi*, and ONNX*. +- [Deep Learning Inference Engine](IE_DG/inference_engine_intro.md) - A unified API to allow high performance inference on many hardware types including Intel® CPU, Intel® Integrated Graphics, Intel® Neural Compute Stick 2, Intel® Vision Accelerator Design with Intel® Movidius™ vision processing unit (VPU) +- [Inference Engine Samples](IE_DG/Samples_Overview.md) - A set of simple console applications demonstrating how to use the Inference Engine in your applications +- [Tools](IE_DG/Tools_Overview.md) - A set of simple console tools to work with your models - [Open Model Zoo](@ref omz_models_intel_index) - [Demos](@ref omz_demos_README) - Console applications that demonstrate how you can use the Inference Engine in your applications to solve specific use cases - [Tools](IE_DG/Tools_Overview.md) - Additional tools to download models and check accuracy - - [Documentation for Pretrained Models](@ref omz_models_intel_index) - Documentation for pretrained models is available in the [Open Model Zoo repository](https://github.com/opencv/open_model_zoo) + - [Documentation for Pretrained Models](@ref omz_models_intel_index) - Documentation for pretrained models that are available in the [Open Model Zoo repository](https://github.com/opencv/open_model_zoo) - [Post-Training Optimization tool](@ref pot_README) - A tool to calibrate a model and then execute it in the INT8 precision - [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction) - A web-based graphical environment that allows you to easily use various sophisticated OpenVINO™ toolkit components - Deep Learning Streamer (DL Streamer) – Streaming analytics framework, based on GStreamer, for constructing graphs of media analytics components. DL Streamer can be installed by the Intel® Distribution of OpenVINO™ toolkit installer. Its open source version is available on [GitHub](https://github.com/opencv/gst-video-analytics). For the DL Streamer documentation, see: @@ -39,27 +97,6 @@ OpenVINO™ toolkit includes the following components: - [Elements](https://github.com/opencv/gst-video-analytics/wiki/Elements) - [Tutorial](https://github.com/opencv/gst-video-analytics/wiki/DL%20Streamer%20Tutorial) - [OpenCV](https://docs.opencv.org/master/) - OpenCV* community version compiled for Intel® hardware -- Drivers and runtimes for OpenCL™ version 2.1 -- [Intel® Media SDK](https://software.intel.com/en-us/media-sdk) - -## Documentation Set Contents - -OpenVINO™ toolkit documentation set includes the following documents: - -- [Install the Intel® Distribution of OpenVINO™ Toolkit for Linux*](install_guides/installing-openvino-linux.md) -- [Install the Intel® Distribution of OpenVINO™ Toolkit for Windows*](install_guides/installing-openvino-windows.md) -- [Install the Intel® Distribution of OpenVINO™ Toolkit for macOS*](install_guides/installing-openvino-macos.md) -- [Install the Intel® Distribution of OpenVINO™ Toolkit for Raspbian*](install_guides/installing-openvino-raspbian.md) -- [Install OpenVINO™ Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Install_Workbench) -- [Introduction to Deep Learning Deployment Toolkit](IE_DG/Introduction.md) -- [Model Optimizer Developer Guide](MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) -- [Inference Engine Developer Guide](IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) -- [Post-Training Optimization Tool](@ref pot_README) -- [Inference Engine Samples](IE_DG/Samples_Overview.md) -- [Demo Applications](@ref omz_demos_README) -- [Tools](IE_DG/Tools_Overview.md) -- [Pretrained Models](@ref omz_models_intel_index) -- [Known Issues](IE_DG/Known_Issues_Limitations.md) -- [Legal Information](@ref omz_demos_README) - -> **Typical Next Step:** [Introduction to Deep Learning Deployment Toolkit](IE_DG/Introduction.md) +- [Intel® Media SDK](https://software.intel.com/en-us/media-sdk) (in Intel® Distribution of OpenVINO™ toolkit for Linux only) + +OpenVINO™ Toolkit opensource version is available on [GitHub](https://github.com/openvinotoolkit/openvino). For building the Inference Engine from the source code, see the [building instructions](https://github.com/openvinotoolkit/openvino/blob/master/build-instruction.md). \ No newline at end of file From fa23de8a84ec7fb540d9f7a143481c1ba6a85170 Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Tue, 6 Oct 2020 16:19:30 +0300 Subject: [PATCH 14/41] Changed CMake and Python versions --- docs/IE_DG/Samples_Overview.md | 2 +- docs/install_guides/installing-openvino-macos.md | 6 +++--- docs/install_guides/installing-openvino-windows.md | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/IE_DG/Samples_Overview.md b/docs/IE_DG/Samples_Overview.md index 08b509628a6687..2417ee9ca8ceb8 100644 --- a/docs/IE_DG/Samples_Overview.md +++ b/docs/IE_DG/Samples_Overview.md @@ -101,7 +101,7 @@ for the debug configuration — in `/intel64/Debug/`. The recommended Windows* build environment is the following: * Microsoft Windows* 10 * Microsoft Visual Studio* 2017, or 2019 -* CMake* version 2.8.12 or higher +* CMake* version 3.10 or higher > **NOTE**: If you want to use Microsoft Visual Studio 2019, you are required to install CMake 3.14. diff --git a/docs/install_guides/installing-openvino-macos.md b/docs/install_guides/installing-openvino-macos.md index 31dd789519ce62..9a7fa188c364be 100644 --- a/docs/install_guides/installing-openvino-macos.md +++ b/docs/install_guides/installing-openvino-macos.md @@ -48,14 +48,14 @@ The development and target platforms have the same requirements, but you can sel **Software Requirements** -- CMake 3.9 or higher -- Python 3.5 - 3.7 +- CMake 3.10 or higher +- Python 3.6 - 3.7 - Apple Xcode\* Command Line Tools - (Optional) Apple Xcode\* IDE (not required for OpenVINO, but useful for development) **Operating Systems** -- macOS\* 10.14.4 +- macOS\* 10.15 ## Overview diff --git a/docs/install_guides/installing-openvino-windows.md b/docs/install_guides/installing-openvino-windows.md index ecc32fd81aea4f..edefefc711208e 100644 --- a/docs/install_guides/installing-openvino-windows.md +++ b/docs/install_guides/installing-openvino-windows.md @@ -17,9 +17,9 @@ Your installation is complete when these are all completed: 2. Install the dependencies: - [Microsoft Visual Studio* with C++ **2019 or 2017** with MSBuild](http://visualstudio.microsoft.com/downloads/) - - [CMake **2.8.12 or higher** 64-bit](https://cmake.org/download/) + - [CMake **3.10 or higher** 64-bit](https://cmake.org/download/) > **NOTE**: If you want to use Microsoft Visual Studio 2019, you are required to install CMake 3.14. - - [Python **3.5** - **3.7** 64-bit](https://www.python.org/downloads/windows/) + - [Python **3.6** - **3.8** 64-bit](https://www.python.org/downloads/windows/) > **IMPORTANT**: As part of this installation, make sure you click the option to add the application to your `PATH` environment variable. 3. Set Environment Variables @@ -89,9 +89,9 @@ The following components are installed by default: **Software** - [Microsoft Visual Studio* with C++ **2019 or 2017** with MSBuild](http://visualstudio.microsoft.com/downloads/) -- [CMake **2.8.12 or higher** 64-bit](https://cmake.org/download/) +- [CMake **3.10 or higher** 64-bit](https://cmake.org/download/) > **NOTE**: If you want to use Microsoft Visual Studio 2019, you are required to install CMake 3.14. -- [Python **3.5** - **3.7** 64-bit](https://www.python.org/downloads/windows/) +- [Python **3.6** - **3.8** 64-bit](https://www.python.org/downloads/windows/) ## Installation Steps From 94d5318dd4328e00b32aeb27c5491784e6a20469 Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Tue, 6 Oct 2020 17:44:01 +0300 Subject: [PATCH 15/41] Fixed links --- docs/install_guides/installing-openvino-linux.md | 2 +- docs/install_guides/installing-openvino-macos.md | 2 +- docs/install_guides/installing-openvino-windows.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/install_guides/installing-openvino-linux.md b/docs/install_guides/installing-openvino-linux.md index a1c2f77043a627..ee50e397966246 100644 --- a/docs/install_guides/installing-openvino-linux.md +++ b/docs/install_guides/installing-openvino-linux.md @@ -27,7 +27,7 @@ The Intel® Distribution of OpenVINO™ toolkit for Linux\*: | [OpenCV](https://docs.opencv.org/master/) | OpenCV\* community version compiled for Intel® hardware | | [Inference Engine Code Samples](../IE_DG/Samples_Overview.md) | A set of simple console applications demonstrating how to utilize specific OpenVINO capabilities in an application and how to perform specific tasks, such as loading a model, running inference, querying specific device capabilities, and more. | | [Demo Applications](@ref omz_demos_README) | A set of simple console applications that provide robust application templates to help you implement specific deep learning scenarios. | -| [Additional Tools](../IE_DG/Tools_Overview.md) | A set of tools to work with your models | +| Additional Tools | A set of tools to work with your models including [Accuracy Checker utility](@ref omz_tools_accuracy_checker_README), [Post-Training Optimization Tool Guide](@ref pot_README), [Model Downloader](@ref omz_tools_downloader_README) and other | | [Documentation for Pre-Trained Models ](@ref omz_models_intel_index) | Documentation for the pre-trained models available in the [Open Model Zoo repo](https://github.com/opencv/open_model_zoo) | ## System Requirements diff --git a/docs/install_guides/installing-openvino-macos.md b/docs/install_guides/installing-openvino-macos.md index 9a7fa188c364be..15a385ec526bde 100644 --- a/docs/install_guides/installing-openvino-macos.md +++ b/docs/install_guides/installing-openvino-macos.md @@ -29,7 +29,7 @@ The following components are installed by default: | [OpenCV\*](https://docs.opencv.org/master/) | OpenCV\* community version compiled for Intel® hardware | | [Sample Applications](../IE_DG/Samples_Overview.md) | A set of simple console applications demonstrating how to use the Inference Engine in your applications. | | [Demos](@ref omz_demos_README) | A set of console applications that demonstrate how you can use the Inference Engine in your applications to solve specific use-cases | -| [Additional Tools](../IE_DG/Tools_Overview.md) | A set of tools to work with your models | +| Additional Tools | A set of tools to work with your models including [Accuracy Checker utility](@ref omz_tools_accuracy_checker_README), [Post-Training Optimization Tool Guide](@ref pot_README), [Model Downloader](@ref omz_tools_downloader_README) and other | | [Documentation for Pre-Trained Models ](@ref omz_models_intel_index) | Documentation for the pre-trained models available in the [Open Model Zoo repo](https://github.com/opencv/open_model_zoo) | ## Development and Target Platform diff --git a/docs/install_guides/installing-openvino-windows.md b/docs/install_guides/installing-openvino-windows.md index edefefc711208e..567697d9cb6b87 100644 --- a/docs/install_guides/installing-openvino-windows.md +++ b/docs/install_guides/installing-openvino-windows.md @@ -60,7 +60,7 @@ The following components are installed by default: |[OpenCV\*](https://docs.opencv.org/master/) |OpenCV* community version compiled for Intel® hardware | |[Inference Engine Samples](../IE_DG/Samples_Overview.md) |A set of simple console applications demonstrating how to use Intel's Deep Learning Inference Engine in your applications. | | [Demos](@ref omz_demos_README) | A set of console applications that demonstrate how you can use the Inference Engine in your applications to solve specific use-cases | -| [Additional Tools](../IE_DG/Tools_Overview.md) | A set of tools to work with your models | +| Additional Tools | A set of tools to work with your models including [Accuracy Checker utility](@ref omz_tools_accuracy_checker_README), [Post-Training Optimization Tool Guide](@ref pot_README), [Model Downloader](@ref omz_tools_downloader_README) and other | | [Documentation for Pre-Trained Models ](@ref omz_models_intel_index) | Documentation for the pre-trained models available in the [Open Model Zoo repo](https://github.com/opencv/open_model_zoo) | ### System Requirements From 0039126fdc1f323c32f6908653a6ca38cd656606 Mon Sep 17 00:00:00 2001 From: Nikolay Tyukaev Date: Tue, 6 Oct 2020 17:44:37 +0300 Subject: [PATCH 16/41] some layout changes --- docs/doxygen/openvino_docs.xml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/docs/doxygen/openvino_docs.xml b/docs/doxygen/openvino_docs.xml index 708a3a0c2936d6..0dee267020e94b 100644 --- a/docs/doxygen/openvino_docs.xml +++ b/docs/doxygen/openvino_docs.xml @@ -7,11 +7,17 @@ - - + + + + + + + + - + From fdc785dfa69b9d61d561fd0347ff212ab0c3f1e2 Mon Sep 17 00:00:00 2001 From: Nikolay Tyukaev Date: Tue, 6 Oct 2020 18:26:36 +0300 Subject: [PATCH 17/41] some layout changes --- docs/doxygen/ie_docs.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/doxygen/ie_docs.xml b/docs/doxygen/ie_docs.xml index 1e45e2bff56af4..d5c32a981fce69 100644 --- a/docs/doxygen/ie_docs.xml +++ b/docs/doxygen/ie_docs.xml @@ -281,7 +281,7 @@ - + From 59c93e5e5e207fb8a526c634390497f75f197513 Mon Sep 17 00:00:00 2001 From: Nikolay Tyukaev Date: Tue, 6 Oct 2020 19:48:56 +0300 Subject: [PATCH 18/41] some layout changes --- docs/doxygen/ie_docs.xml | 5 ++--- docs/doxygen/openvino_docs.xml | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/docs/doxygen/ie_docs.xml b/docs/doxygen/ie_docs.xml index d5c32a981fce69..53c9764ba409c0 100644 --- a/docs/doxygen/ie_docs.xml +++ b/docs/doxygen/ie_docs.xml @@ -56,6 +56,8 @@ + + @@ -296,8 +298,5 @@ - - - diff --git a/docs/doxygen/openvino_docs.xml b/docs/doxygen/openvino_docs.xml index 0dee267020e94b..58166bb33214af 100644 --- a/docs/doxygen/openvino_docs.xml +++ b/docs/doxygen/openvino_docs.xml @@ -13,7 +13,7 @@ - + @@ -36,6 +36,7 @@ + @@ -55,7 +56,6 @@ - @@ -64,7 +64,6 @@ - From 2962a9d1ef10e7d33d04f0783084c9bbb6a9b8e7 Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Tue, 6 Oct 2020 21:13:15 +0300 Subject: [PATCH 19/41] COnverted svg images to png --- docs/img/OV-diagram-full.svg | 501 ---------------------------------- docs/img/OV-diagram-step2.png | 3 + docs/img/OV-diagram-step2.svg | 182 ------------ docs/img/OV-diagram-step3.png | 3 + docs/img/OV-diagram-step3.svg | 196 ------------- docs/img/OpenVINO-diagram.png | 3 + docs/index.md | 6 +- 7 files changed, 12 insertions(+), 882 deletions(-) delete mode 100644 docs/img/OV-diagram-full.svg create mode 100644 docs/img/OV-diagram-step2.png delete mode 100644 docs/img/OV-diagram-step2.svg create mode 100644 docs/img/OV-diagram-step3.png delete mode 100644 docs/img/OV-diagram-step3.svg create mode 100644 docs/img/OpenVINO-diagram.png diff --git a/docs/img/OV-diagram-full.svg b/docs/img/OV-diagram-full.svg deleted file mode 100644 index 3f0d9127f15daa..00000000000000 --- a/docs/img/OV-diagram-full.svg +++ /dev/null @@ -1,501 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Step 0. Planning and Setup - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Determine environments & configuration required - Determine model type & framework - - Step 1. Select a Model - - - - - - - - - - - - - - - - - - Find or Train a Model - - - - - Is themodel accurateenough? - - - - - - - - - - - - - - - - - - - - - - Train orRe-train model - - Step 1a. Train - - - - - - - - - - - - - - - - - Step 2a. Modify the Model Post-Training - Step 2. Modify - - - - - Run Model Optimizer - - - - - Did the model convert? - - - - - - - - - - - - - - - - - Fix errors and/or create custom layers - - - - - Did that work? - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Escalate to Intel or try alternative models - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Step 3a. Advance Model Tuning - Step 3. Tune - - - - - Run Inference Engine on model - - - - - Was it fast enough with acceptable accuracy? - - - - - - - - - - - - - - - - - Try advanced tuning of the IR model - - - - - Did that work? - - - - - - - - - - - - - - - - - - - - - - - Try hardware-based interventions - - - - - Try training extensions - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Step 4. Deploy - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Integrate model to pipeline or application - Package for deployment - Deploy app and model - Fast Path - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/img/OV-diagram-step2.png b/docs/img/OV-diagram-step2.png new file mode 100644 index 00000000000000..a8095441e2427a --- /dev/null +++ b/docs/img/OV-diagram-step2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a5e64956ea61461c3bf942cd3802c16b33188e7bef8955384d46dd08a84f4d3 +size 46658 diff --git a/docs/img/OV-diagram-step2.svg b/docs/img/OV-diagram-step2.svg deleted file mode 100644 index 0789cfd9c5e4a5..00000000000000 --- a/docs/img/OV-diagram-step2.svg +++ /dev/null @@ -1,182 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - Step 2a. Modify the Model Post-Training - Step 2. Modify - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Run Model Optimizer - Did themodel convert? - Fix errors and/ or create custom layers - Did that work? - Escalate to Intel or try alternative models - - - - - - - - - - - - - - - - - Go to next step - - - - - - - - - - - - - - - - - Go to next step - - - - - - - - - - - - - - diff --git a/docs/img/OV-diagram-step3.png b/docs/img/OV-diagram-step3.png new file mode 100644 index 00000000000000..5e4b233b902dff --- /dev/null +++ b/docs/img/OV-diagram-step3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f7083e9eff7158ca99990daa1a9ee33c73c09e7a2f3d6c6da316aa9be3ad1ec +size 50327 diff --git a/docs/img/OV-diagram-step3.svg b/docs/img/OV-diagram-step3.svg deleted file mode 100644 index 7c906ce27d170e..00000000000000 --- a/docs/img/OV-diagram-step3.svg +++ /dev/null @@ -1,196 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Step 3a. Advance Model Tuning - Step 3. Tune - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Run Inference Engine on model - Was it fast enough with acceptable accuracy? - Try advanced tuning of theIR model - Did that work? - Tryhardware-basedinterventions - Try trainingextensions - Go to next step - Go to next step - - diff --git a/docs/img/OpenVINO-diagram.png b/docs/img/OpenVINO-diagram.png new file mode 100644 index 00000000000000..f069f8cf947598 --- /dev/null +++ b/docs/img/OpenVINO-diagram.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00ec72e982f658698b599dd8cbcbd50996a1982c4223bce93b807fa6b0c0c825 +size 233866 diff --git a/docs/index.md b/docs/index.md index 260b50f18b7630..9703030128fc7a 100644 --- a/docs/index.md +++ b/docs/index.md @@ -14,7 +14,7 @@ OpenVINO™ toolkit: ## OpenVINO™ Toolkit Workflow The following diagram illustrates the typical OpenVINO™ workflow (click to see the full-size image): -![](img/OV-diagram-full.svg) +![](img/OV-diagram-full.png) ### Model Preparation, Conversion and Optimization @@ -23,7 +23,7 @@ Several of these pretrained models are used also in the [code samples](E_DG/Samp One of the core component of the OpenVINO™ toolkit is the [Model Optimizer](MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) a cross-platform command-line tool that converts a trained neural network from its source framework to an open-source, nGraph-compatible [Intermediate Representation (IR)](MO_DG/IR_and_opsets.md) for use in inference operations. The Model Optimizer imports models trained in popular frameworks such as Caffe*, TensorFlow*, MXNet*, Kaldi*, and ONNX* and performs a few optimizations to remove excess layers and group operations when possible into simpler, faster graphs. -![](img/OV-diagram-step2.svg) +![](img/OV-diagram-step2.png) If your neural network model contains layers that are not in the list of known layers for supported frameworks, you can adjust the conversion and optimization process through use of [Custom Layers](HOWTO/Custom_Layers_Guide.md). @@ -44,7 +44,7 @@ The other core component of OpenVINO™ is the [Inference Engine](IE_DG/Deep_Lea You can use OpenVINO™ Tuning Utilities with the Inference Engine to trial and test inference on your model. The Benchmark utility uses an input model to run iterative tests for throughput or latency measures, and the Cross Check utility compares performance of differently configured inferences. The [Post-Training Optimization Tool](@ref pot_README) integrates a suite of quantization- and calibration-based tools to further streamline performance. For a full browser-based studio integrating these other key tuning utilities, try the [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction). -![](img/OV-diagram-step3.svg) +![](img/OV-diagram-step3.png) OpenVINO™ toolkit includes a set of [inference code samples](IE_DG/Samples_Overview.md) and [application demos](@ref omz_demos_README) showing how inference is run and output processed for use in retail environments, classrooms, smart camera applications, and other solutions. From c2fa6817f78c3021c37c6a8b9e68b16d29e6ae90 Mon Sep 17 00:00:00 2001 From: Nikolay Tyukaev Date: Tue, 6 Oct 2020 21:25:50 +0300 Subject: [PATCH 20/41] layouts --- docs/doxygen/doxy_md_filter.py | 4 ++-- docs/doxygen/ie_docs.xml | 4 ++++ docs/doxygen/ngraph_cpp_api.xml | 10 ++++------ docs/doxygen/ngraph_py_api.xml | 10 ++++------ 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/doxygen/doxy_md_filter.py b/docs/doxygen/doxy_md_filter.py index e0c5b502cf4e68..8ac57b48da1442 100644 --- a/docs/doxygen/doxy_md_filter.py +++ b/docs/doxygen/doxy_md_filter.py @@ -72,9 +72,9 @@ def process(docs_folder): md_folder = os.path.dirname(md_file) with open(md_file, 'r', encoding='utf-8') as f: content = f.read() - inline_links = set(re.findall(r'!?\[.*?\]\(([\w\/\-\.]+\.(md|png|jpg|gif))\)', content, flags=re.IGNORECASE)) + inline_links = set(re.findall(r'!?\[.*?\]\(([\w\/\-\.]+\.(md|png|jpg|gif|svg))\)', content, flags=re.IGNORECASE)) github_md_links = set(re.findall(r'(\[(.+?)\]\((https:[\w\.\/-]+?\.md)\))', content, flags=re.IGNORECASE)) - reference_links = set(re.findall(r'\[.+\]\:\s*?([\w\/\-\.]+\.(md|png|jpg|gif))', content, flags=re.IGNORECASE)) + reference_links = set(re.findall(r'\[.+\]\:\s*?([\w\/\-\.]+\.(md|png|jpg|gif|svg))', content, flags=re.IGNORECASE)) content = replace_links(content, inline_links, md_folder, labels, docs_folder) content = replace_links(content, reference_links, md_folder, labels, docs_folder) content = process_github_md_links(content, github_md_links) diff --git a/docs/doxygen/ie_docs.xml b/docs/doxygen/ie_docs.xml index 53c9764ba409c0..c89f45fe65148e 100644 --- a/docs/doxygen/ie_docs.xml +++ b/docs/doxygen/ie_docs.xml @@ -294,6 +294,10 @@ + + + + diff --git a/docs/doxygen/ngraph_cpp_api.xml b/docs/doxygen/ngraph_cpp_api.xml index ff0621b5bed70b..ec1b5ecdf3a94c 100644 --- a/docs/doxygen/ngraph_cpp_api.xml +++ b/docs/doxygen/ngraph_cpp_api.xml @@ -3,12 +3,10 @@ - - - - - - + + + + diff --git a/docs/doxygen/ngraph_py_api.xml b/docs/doxygen/ngraph_py_api.xml index 9a482af7fe17cc..a7e3cd03dadb2e 100644 --- a/docs/doxygen/ngraph_py_api.xml +++ b/docs/doxygen/ngraph_py_api.xml @@ -3,12 +3,10 @@ - - - - - - + + + + From 6bc7be05afd3f8d2e4dcfd11efa22a2c9158f917 Mon Sep 17 00:00:00 2001 From: Nikolay Tyukaev Date: Tue, 6 Oct 2020 21:46:33 +0300 Subject: [PATCH 21/41] update layout --- docs/doxygen/openvino_docs.xml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/doxygen/openvino_docs.xml b/docs/doxygen/openvino_docs.xml index 58166bb33214af..0fc6bb0ff71fb7 100644 --- a/docs/doxygen/openvino_docs.xml +++ b/docs/doxygen/openvino_docs.xml @@ -27,8 +27,7 @@ - - + From 2cb0de95d51b57d3baa57d1e227831cc839555dc Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Tue, 6 Oct 2020 22:17:18 +0300 Subject: [PATCH 22/41] Added a label for nGraph_Python_API.md --- docs/nGraph_DG/nGraph_Python_API.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/nGraph_DG/nGraph_Python_API.md b/docs/nGraph_DG/nGraph_Python_API.md index 898507d70fa340..5859ff27ad031e 100644 --- a/docs/nGraph_DG/nGraph_Python_API.md +++ b/docs/nGraph_DG/nGraph_Python_API.md @@ -1,4 +1,4 @@ -# Using nGraph's Python API +# Using nGraph's Python API {#openvino_docs_nGraph_DG_PythonAPI} nGraph is the OpenVINO™ graph manipulation library, used to represent neural network models in the form of a computational graph. With nGraph Python APIs, you can create, inspect, and modify computational graphs. From f34c86f692f588e8c682b5f0a3af374f689c03db Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Tue, 6 Oct 2020 22:30:32 +0300 Subject: [PATCH 23/41] fixed links --- docs/index.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/index.md b/docs/index.md index 9703030128fc7a..80de77582cea79 100644 --- a/docs/index.md +++ b/docs/index.md @@ -58,7 +58,7 @@ Useful documents for inference tuning: * [Post-Training Optimization Tool Guide](@ref pot_README) * [Deep Learning Workbench Guide](@ref workbench_docs_Workbench_DG_Introduction) * [Intel Media SDK](https://github.com/Intel-Media-SDK/MediaSDK) -* [DL Streamer Samples](IE_DG/Tools_Overview.md) +* [DL Streamer Samples](@ref gst_samples_README) * [OpenCV](https://docs.opencv.org/master/) * [OpenVX](https://software.intel.com/en-us/openvino-ovx-guide) @@ -69,12 +69,12 @@ The Intel Distribution of OpenVINO™ toolkit outputs optimized inference runtim * Intel® Neural Compute Stick 2 * Intel® Vision Accelerator Design with Intel® Movidius™ VPUs -The Inference Engine's plug-in architecture can be extended to meet other specialized needs. [Deployment Manager](install_guides/deployment_manager_tool.md) is a Python* command-line tool that assembles the tuned model, IR files, your application, and required dependencies into a runtime package for your target device. It outputs packages for CPU, GPU, and VPU on Linux* and Windows*, and Neural Compute Stick-optimized packages with Linux. +The Inference Engine's plug-in architecture can be extended to meet other specialized needs. [Deployment Manager](./install_guides/deployment-manager-tool.md) is a Python* command-line tool that assembles the tuned model, IR files, your application, and required dependencies into a runtime package for your target device. It outputs packages for CPU, GPU, and VPU on Linux* and Windows*, and Neural Compute Stick-optimized packages with Linux. -[Inference Engine Integration Workflow](IE_DG/Integrate_with_customer_application_new_API.md) -[Inference Engine API References](./api_references.html) -[Inference Engine Plug-in Developer Guide](./ie_plugin_api/index.html) -[Deployment Manager Guide](install_guides/deployment_manager_tool.md) +* [Inference Engine Integration Workflow](IE_DG/Integrate_with_customer_application_new_API.md) +* [Inference Engine API References](./api_references.html) +* [Inference Engine Plug-in Developer Guide](./ie_plugin_api/index.html) +* [Deployment Manager Guide](./install_guides/deployment-manager-tool.md) ## OpenVINO™ Toolkit Components @@ -92,11 +92,11 @@ Intel® Distribution of OpenVINO™ toolkit includes the following components: - [Post-Training Optimization tool](@ref pot_README) - A tool to calibrate a model and then execute it in the INT8 precision - [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction) - A web-based graphical environment that allows you to easily use various sophisticated OpenVINO™ toolkit components - Deep Learning Streamer (DL Streamer) – Streaming analytics framework, based on GStreamer, for constructing graphs of media analytics components. DL Streamer can be installed by the Intel® Distribution of OpenVINO™ toolkit installer. Its open source version is available on [GitHub](https://github.com/opencv/gst-video-analytics). For the DL Streamer documentation, see: - - [DL Streamer Samples](IE_DG/Tools_Overview.md) + - [DL Streamer Samples](@ref gst_samples_README) - [API Reference](https://openvinotoolkit.github.io/dlstreamer_gst/) - [Elements](https://github.com/opencv/gst-video-analytics/wiki/Elements) - [Tutorial](https://github.com/opencv/gst-video-analytics/wiki/DL%20Streamer%20Tutorial) - [OpenCV](https://docs.opencv.org/master/) - OpenCV* community version compiled for Intel® hardware - [Intel® Media SDK](https://software.intel.com/en-us/media-sdk) (in Intel® Distribution of OpenVINO™ toolkit for Linux only) -OpenVINO™ Toolkit opensource version is available on [GitHub](https://github.com/openvinotoolkit/openvino). For building the Inference Engine from the source code, see the [building instructions](https://github.com/openvinotoolkit/openvino/blob/master/build-instruction.md). \ No newline at end of file +OpenVINO™ Toolkit opensource version is available on [GitHub](https://github.com/openvinotoolkit/openvino). For building the Inference Engine from the source code, see the build instructions. \ No newline at end of file From b86a8af5c4483f893f45b6363e2e8581794f5bbf Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Tue, 6 Oct 2020 22:38:13 +0300 Subject: [PATCH 24/41] Fixed image --- docs/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.md b/docs/index.md index 80de77582cea79..48d0e14c2cce97 100644 --- a/docs/index.md +++ b/docs/index.md @@ -14,7 +14,7 @@ OpenVINO™ toolkit: ## OpenVINO™ Toolkit Workflow The following diagram illustrates the typical OpenVINO™ workflow (click to see the full-size image): -![](img/OV-diagram-full.png) +![](img/OpenVINO-diagram.png) ### Model Preparation, Conversion and Optimization From f224e9c53ddd43713dcebd1d52060b906bed2eed Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Wed, 7 Oct 2020 00:45:26 +0300 Subject: [PATCH 25/41] removed links to ../IE_DG/Introduction.md --- docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md | 4 +--- docs/IE_DG/PythonPackage_Overview.md | 2 +- docs/IE_DG/Samples_Overview.md | 2 +- docs/IE_DG/Tools_Overview.md | 2 +- docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md | 4 +--- docs/get_started/get_started_linux.md | 2 +- docs/get_started/get_started_macos.md | 2 +- docs/get_started/get_started_windows.md | 2 +- docs/install_guides/installing-openvino-windows.md | 2 +- 9 files changed, 9 insertions(+), 13 deletions(-) diff --git a/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md b/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md index 309281119eeb61..20f415a731766e 100644 --- a/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md +++ b/docs/IE_DG/Deep_Learning_Inference_Engine_DevGuide.md @@ -42,8 +42,6 @@ inference of a pre-trained and optimized deep learning model and a set of sample ## Table of Contents -* [Introduction to Intel® Deep Learning Deployment Toolkit](Introduction.md) - * [Inference Engine API Changes History](API_Changes.md) * [Introduction to Inference Engine](inference_engine_intro.md) @@ -87,4 +85,4 @@ inference of a pre-trained and optimized deep learning model and a set of sample * [Known Issues](Known_Issues_Limitations.md) -**Typical Next Step:** [Introduction to Intel® Deep Learning Deployment Toolkit](Introduction.md) +**Typical Next Step:** [Introduction to Inference Engine](inference_engine_intro.md) diff --git a/docs/IE_DG/PythonPackage_Overview.md b/docs/IE_DG/PythonPackage_Overview.md index 411f082609f3d8..3a5704a75f5eaa 100644 --- a/docs/IE_DG/PythonPackage_Overview.md +++ b/docs/IE_DG/PythonPackage_Overview.md @@ -12,4 +12,4 @@ The OpenVINO™ Python\* package includes the following sub-packages: - `openvino.tools.benchmark` - Measure latency and throughput. ## See Also -* [Introduction to Intel's Deep Learning Inference Engine](Introduction.md) +* [Introduction to Inference Engine](inference_engine_intro.md) diff --git a/docs/IE_DG/Samples_Overview.md b/docs/IE_DG/Samples_Overview.md index 2417ee9ca8ceb8..64a9462ef31457 100644 --- a/docs/IE_DG/Samples_Overview.md +++ b/docs/IE_DG/Samples_Overview.md @@ -183,4 +183,4 @@ sample, read the sample documentation by clicking the sample name in the samples list above. ## See Also -* [Introduction to Intel's Deep Learning Inference Engine](Introduction.md) +* [Introduction to Inference Engine](inference_engine_intro.md) diff --git a/docs/IE_DG/Tools_Overview.md b/docs/IE_DG/Tools_Overview.md index 6c543c810d0d2f..6600554785b34e 100644 --- a/docs/IE_DG/Tools_Overview.md +++ b/docs/IE_DG/Tools_Overview.md @@ -14,4 +14,4 @@ The OpenVINO™ toolkit installation includes the following tools: ## See Also -* [Introduction to Deep Learning Inference Engine](Introduction.md) +* [Introduction to Inference Engine](inference_engine_intro.md) diff --git a/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md b/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md index dbfb1e7a6be7bf..0cdd936f189f10 100644 --- a/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md +++ b/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md @@ -63,8 +63,6 @@ Model Optimizer produces an Intermediate Representation (IR) of the network, whi ## Table of Content -* [Introduction to OpenVINO™ Deep Learning Deployment Toolkit](../IE_DG/Introduction.md) - * [Preparing and Optimizing your Trained Model with Model Optimizer](prepare_model/Prepare_Trained_Model.md) * [Configuring Model Optimizer](prepare_model/Config_Model_Optimizer.md) * [Converting a Model to Intermediate Representation (IR)](prepare_model/convert_model/Converting_Model.md) @@ -107,4 +105,4 @@ Model Optimizer produces an Intermediate Representation (IR) of the network, whi * [Known Issues](Known_Issues_Limitations.md) -**Typical Next Step:** [Introduction to Intel® Deep Learning Deployment Toolkit](../IE_DG/Introduction.md) +**Typical Next Step:** [Preparing and Optimizing your Trained Model with Model Optimizer](prepare_model/Prepare_Trained_Model.md) diff --git a/docs/get_started/get_started_linux.md b/docs/get_started/get_started_linux.md index 1c3549e5f77e0e..231db418cf2de1 100644 --- a/docs/get_started/get_started_linux.md +++ b/docs/get_started/get_started_linux.md @@ -559,7 +559,7 @@ You can see all the sample application’s parameters by adding the `-h` or `--h Use these resources to learn more about the OpenVINO™ toolkit: * [OpenVINO™ Release Notes](https://software.intel.com/en-us/articles/OpenVINO-RelNotes) -* [Introduction to Intel® Deep Learning Deployment Toolkit](../IE_DG/Introduction.md) +* [OpenVINO™ Toolkit Overview](../index.md) * [Inference Engine Developer Guide](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) * [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) * [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md) diff --git a/docs/get_started/get_started_macos.md b/docs/get_started/get_started_macos.md index 0c1eafc23d3ed6..7711c2628e22a0 100644 --- a/docs/get_started/get_started_macos.md +++ b/docs/get_started/get_started_macos.md @@ -522,7 +522,7 @@ You can see all the sample application’s parameters by adding the `-h` or `--h Use these resources to learn more about the OpenVINO™ toolkit: * [OpenVINO™ Release Notes](https://software.intel.com/en-us/articles/OpenVINO-RelNotes) -* [Introduction to Intel® Deep Learning Deployment Toolkit](../IE_DG/Introduction.md) +* [OpenVINO™ Toolkit Overview](../index.md) * [Inference Engine Developer Guide](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) * [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) * [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md) diff --git a/docs/get_started/get_started_windows.md b/docs/get_started/get_started_windows.md index b22d46c60e1747..b160199014061f 100644 --- a/docs/get_started/get_started_windows.md +++ b/docs/get_started/get_started_windows.md @@ -533,7 +533,7 @@ You can see all the sample application’s parameters by adding the `-h` or `--h Use these resources to learn more about the OpenVINO™ toolkit: * [OpenVINO™ Release Notes](https://software.intel.com/en-us/articles/OpenVINO-RelNotes) -* [Introduction to Intel® Deep Learning Deployment Toolkit](../IE_DG/Introduction.md) +* [OpenVINO™ Toolkit Overview](../index.md) * [Inference Engine Developer Guide](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) * [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) * [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md) diff --git a/docs/install_guides/installing-openvino-windows.md b/docs/install_guides/installing-openvino-windows.md index 567697d9cb6b87..1d3da007f2db75 100644 --- a/docs/install_guides/installing-openvino-windows.md +++ b/docs/install_guides/installing-openvino-windows.md @@ -463,7 +463,7 @@ To learn more about converting deep learning models, go to: - [Intel Distribution of OpenVINO Toolkit home page](https://software.intel.com/en-us/openvino-toolkit) - [Intel Distribution of OpenVINO Toolkit documentation](https://software.intel.com/en-us/openvino-toolkit/documentation/featured) - [OpenVINO™ Release Notes](https://software.intel.com/en-us/articles/OpenVINO-RelNotes) -- [Introduction to Intel® Deep Learning Deployment Toolkit](../IE_DG/Introduction.md) +- [Introduction to Inference Engine](inference_engine_intro.md) - [Inference Engine Developer Guide](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) - [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) - [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md) From b98e8d8dce233a186897e72ee385aa4c7dec3775 Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Wed, 7 Oct 2020 01:28:15 +0300 Subject: [PATCH 26/41] Removed links to tools overview page as removed --- docs/index.md | 4 ++-- docs/resources/introduction.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/index.md b/docs/index.md index 48d0e14c2cce97..b242712d51a9cb 100644 --- a/docs/index.md +++ b/docs/index.md @@ -84,10 +84,10 @@ Intel® Distribution of OpenVINO™ toolkit includes the following components: - [Deep Learning Model Optimizer](MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) - A cross-platform command-line tool for importing models and preparing them for optimal execution with the Inference Engine. The Model Optimizer imports, converts, and optimizes models, which were trained in popular frameworks, such as Caffe*, TensorFlow*, MXNet*, Kaldi*, and ONNX*. - [Deep Learning Inference Engine](IE_DG/inference_engine_intro.md) - A unified API to allow high performance inference on many hardware types including Intel® CPU, Intel® Integrated Graphics, Intel® Neural Compute Stick 2, Intel® Vision Accelerator Design with Intel® Movidius™ vision processing unit (VPU) - [Inference Engine Samples](IE_DG/Samples_Overview.md) - A set of simple console applications demonstrating how to use the Inference Engine in your applications -- [Tools](IE_DG/Tools_Overview.md) - A set of simple console tools to work with your models +- Additional Tools - A set of tools to work with your models including [Accuracy Checker utility](@ref omz_tools_accuracy_checker_README), [Post-Training Optimization Tool Guide](@ref pot_README), [Model Downloader](@ref omz_tools_downloader_README) and other - [Open Model Zoo](@ref omz_models_intel_index) - [Demos](@ref omz_demos_README) - Console applications that demonstrate how you can use the Inference Engine in your applications to solve specific use cases - - [Tools](IE_DG/Tools_Overview.md) - Additional tools to download models and check accuracy + - Additional Tools - A set of tools to work with your models including [Accuracy Checker utility](@ref omz_tools_accuracy_checker_README), [Post-Training Optimization Tool Guide](@ref pot_README), [Model Downloader](@ref omz_tools_downloader_README) and other - [Documentation for Pretrained Models](@ref omz_models_intel_index) - Documentation for pretrained models that are available in the [Open Model Zoo repository](https://github.com/opencv/open_model_zoo) - [Post-Training Optimization tool](@ref pot_README) - A tool to calibrate a model and then execute it in the INT8 precision - [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction) - A web-based graphical environment that allows you to easily use various sophisticated OpenVINO™ toolkit components diff --git a/docs/resources/introduction.md b/docs/resources/introduction.md index 84d53409c92c4f..6a3c4ccfaa4a28 100644 --- a/docs/resources/introduction.md +++ b/docs/resources/introduction.md @@ -4,7 +4,7 @@ ## Samples - [Inference Engine Samples](../IE_DG/Samples_Overview.md) -- [DL Streamer Samples](../IE_DG/Tools_Overview.md) +- [DL Streamer Samples](@ref gst_samples_README) ## Demos @@ -13,7 +13,7 @@ ## Additional Tools -- [Tools for models calibration and accuracy measurement](../IE_DG/Tools_Overview.md) +- A set of tools to work with your models including [Accuracy Checker utility](@ref omz_tools_accuracy_checker_README), [Post-Training Optimization Tool Guide](@ref pot_README), [Model Downloader](@ref omz_tools_downloader_README) and other ## Pre-Trained Models From 7481a299282f1d18cee0552f9090390d99aacd92 Mon Sep 17 00:00:00 2001 From: Nikolay Tyukaev Date: Wed, 7 Oct 2020 01:51:45 +0300 Subject: [PATCH 27/41] some changes --- docs/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.md b/docs/index.md index 48d0e14c2cce97..08730c7a65105a 100644 --- a/docs/index.md +++ b/docs/index.md @@ -19,7 +19,7 @@ The following diagram illustrates the typical OpenVINO™ workflow (click to see ### Model Preparation, Conversion and Optimization You can use your framework of choice to prepare and train a Deep Learning model or just download a pretrained model from the Open Model Zoo. The Open Model Zoo includes Deep Learning solutions to a variety of vision problems, including object recognition, face recognition, pose estimation, text detection, and action recognition, at a range of measured complexities. -Several of these pretrained models are used also in the [code samples](E_DG/Samples_Overview.md) and [application demos](@ref omz_demos_README). To download models from the Open Model Zoo, the [Model Downloader](@ref omz_tools_downloader_README) tool is used. +Several of these pretrained models are used also in the [code samples](IE_DG/Samples_Overview.md) and [application demos](@ref omz_demos_README). To download models from the Open Model Zoo, the [Model Downloader](@ref omz_tools_downloader_README) tool is used. One of the core component of the OpenVINO™ toolkit is the [Model Optimizer](MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) a cross-platform command-line tool that converts a trained neural network from its source framework to an open-source, nGraph-compatible [Intermediate Representation (IR)](MO_DG/IR_and_opsets.md) for use in inference operations. The Model Optimizer imports models trained in popular frameworks such as Caffe*, TensorFlow*, MXNet*, Kaldi*, and ONNX* and performs a few optimizations to remove excess layers and group operations when possible into simpler, faster graphs. From 984ea4c774ee32f494e11650e654b5fcb64950a1 Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Wed, 7 Oct 2020 01:55:35 +0300 Subject: [PATCH 28/41] Remove link to Integrate_your_kernels_into_IE.md --- docs/HOWTO/Custom_Layers_Guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/HOWTO/Custom_Layers_Guide.md b/docs/HOWTO/Custom_Layers_Guide.md index 40700917808249..2ded4bf56691c0 100644 --- a/docs/HOWTO/Custom_Layers_Guide.md +++ b/docs/HOWTO/Custom_Layers_Guide.md @@ -195,7 +195,7 @@ For a step-by-step walk-through creating and executing a custom layer, see [Cust - Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit) - OpenVINO™ toolkit online documentation: [https://docs.openvinotoolkit.org](https://docs.openvinotoolkit.org) - [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) -- [Kernel Extensivility in the Inference Engine Developer Guide](../IE_DG/Integrate_your_kernels_into_IE.md) +- [Inference Engine Extensibility Mechanism](../IE_DG/Extensibility_DG/Intro.md) - [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md) - [Overview of OpenVINO™ Toolkit Pre-Trained Models](@ref omz_models_intel_index) - [Inference Engine Tutorials](https://github.com/intel-iot-devkit/inference-tutorials-generic) From a72cfc049a4d2820ee53beb71de7a04b8a777d99 Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Wed, 7 Oct 2020 03:24:19 +0300 Subject: [PATCH 29/41] remove openvino_docs_IE_DG_Graph_debug_capabilities from layout as it was removed --- docs/doxygen/ie_docs.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/doxygen/ie_docs.xml b/docs/doxygen/ie_docs.xml index 8bba8c171f57d3..124fe920acb602 100644 --- a/docs/doxygen/ie_docs.xml +++ b/docs/doxygen/ie_docs.xml @@ -240,7 +240,6 @@ - From cb4ff59e9d60c4a59c58106dd9992e75ff380ec2 Mon Sep 17 00:00:00 2001 From: Nikolay Tyukaev Date: Thu, 8 Oct 2020 00:20:50 +0300 Subject: [PATCH 30/41] update layouts --- docs/doxygen/ie_py_api.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/doxygen/ie_py_api.xml b/docs/doxygen/ie_py_api.xml index 251bba9211bec0..1bf91bbbe0d1a0 100644 --- a/docs/doxygen/ie_py_api.xml +++ b/docs/doxygen/ie_py_api.xml @@ -28,9 +28,9 @@ - + - + From 7fc1e9d7f17486702c2666447e2bf82b041232de Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Thu, 8 Oct 2020 19:23:01 +0300 Subject: [PATCH 31/41] Post-release fixes and installation path changes --- docs/benchmarks/performance_benchmarks_faq.md | 11 ++--- docs/get_started/get_started_linux.md | 46 +++++++++++-------- docs/get_started/get_started_macos.md | 40 ++++++++-------- docs/get_started/get_started_windows.md | 6 +-- docs/img/openvino-install-windows-01.png | 4 +- docs/img/openvino-install-windows-02.png | 4 +- docs/img/openvino-install-windows-03.png | 4 +- .../installing-openvino-linux.md | 32 ++++++------- .../installing-openvino-macos.md | 18 ++++---- .../installing-openvino-windows.md | 36 +++++++-------- 10 files changed, 103 insertions(+), 98 deletions(-) diff --git a/docs/benchmarks/performance_benchmarks_faq.md b/docs/benchmarks/performance_benchmarks_faq.md index 94ea536e0a337f..67aa1bb2972ae8 100644 --- a/docs/benchmarks/performance_benchmarks_faq.md +++ b/docs/benchmarks/performance_benchmarks_faq.md @@ -51,19 +51,16 @@ We published a set of guidelines and recommendations to optimize your models ava #### 9. Why are INT8 optimized models used for benchmarking on CPUs with no VNNI support? The benefit of low-precision optimization using the OpenVINO™ toolkit model optimizer extends beyond processors supporting VNNI through Intel® DL Boost. The reduced bit width of INT8 compared to FP32 allows Intel® CPU to process the data faster and thus offers better throughput on any converted model agnostic of the intrinsically supported low-precision optimizations within Intel® hardware. Please refer to [INT8 vs. FP32 Comparison on Select Networks and Platforms](./performance_int8_vs_fp32.html) for comparison on boost factors for different network models and a selection of Intel® CPU architectures, including AVX-2 with Intel® Core™ i7-8700T, and AVX-512 (VNNI) with Intel® Xeon® 5218T and Intel® Xeon® 8270. -#### 10. Previous releases included benchmarks on googlenet-v1. Why is there no longer benchmarks on this neural network model? -We replaced googlenet-v1 to [resnet-18-pytorch](https://github.com/opencv/open_model_zoo/blob/master/models/public/resnet-18-pytorch/resnet-18-pytorch.md) due to changes in developer usage. The public model resnet-18 is used by many developers as an Image Classification model. This pre-optimized model was also trained on the ImageNet database, similar to googlenet-v1. Both googlenet-v1 and resnet-18 will remain part of the Open Model Zoo. Developers are encouraged to utilize resnet-18-pytorch for Image Classification use cases. - -#### 11. Previous releases included benchmarks on googlenet-v1-CF (Caffe). Why is there no longer benchmarks on this neural network model? +#### 10. Previous releases included benchmarks on googlenet-v1-CF (Caffe). Why is there no longer benchmarks on this neural network model? We replaced googlenet-v1-CF to resnet-18-pytorch due to changes in developer usage. The public model resnet-18 is used by many developers as an Image Classification model. This pre-optimized model was also trained on the ImageNet database, similar to googlenet-v1-CF. Both googlenet-v1-CF and resnet-18 will remain part of the Open Model Zoo. Developers are encouraged to utilize resnet-18-pytorch for Image Classification use cases. -#### 12. Why have resnet-50-CF, mobilenet-v1-1.0-224-CF, mobilenet-v2-CF and resnet-101-CF been removed? +#### 11. Why have resnet-50-CF, mobilenet-v1-1.0-224-CF, mobilenet-v2-CF and resnet-101-CF been removed? The CAFFE version of resnet-50, mobilenet-v1-1.0-224 and mobilenet-v2 have been replaced with their TensorFlow and PyTorch counterparts. Resnet-50-CF is replaced by resnet-50-TF, mobilenet-v1-1.0-224-CF is replaced by mobilenet-v1-1.0-224-TF and mobilenet-v2-CF is replaced by mobilenetv2-PyTorch. Resnet-50-CF an resnet-101-CF are no longer maintained at their public source repos. -#### 13. Where can I search for OpenVINO™ performance results based on HW-platforms? +#### 12. Where can I search for OpenVINO™ performance results based on HW-platforms? The web site format has changed in order to support the more common search approach of looking for the performance of a given neural network model on different HW-platforms. As opposed to review a given HW-platform's performance on different neural network models. -#### 14. How is Latency measured? +#### 13. How is Latency measured? Latency is measured by running the OpenVINO™ inference engine in synchronous mode. In synchronous mode each frame or image is processed through the entire set of stages (pre-processing, inference, post-processing) before the next frame or image is processed. This KPI is relevant for applications where the inference on a single image is required, for example the analysis of an ultra sound image in a medical application or the analysis of a seismic image in the oil & gas industry. Other use cases include real-time or near real-time applications like an industrial robot's response to changes in its environment and obstacle avoidance for autonomous vehicles where a quick response to the result of the inference is required. \htmlonly diff --git a/docs/get_started/get_started_linux.md b/docs/get_started/get_started_linux.md index 231db418cf2de1..1483815a61f8e1 100644 --- a/docs/get_started/get_started_linux.md +++ b/docs/get_started/get_started_linux.md @@ -23,9 +23,15 @@ In addition, demo scripts, code samples and demo applications are provided to he ## Intel® Distribution of OpenVINO™ toolkit Installation and Deployment Tools Directory Structure This guide assumes you completed all Intel® Distribution of OpenVINO™ toolkit installation and configuration steps. If you have not yet installed and configured the toolkit, see [Install Intel® Distribution of OpenVINO™ toolkit for Linux*](../install_guides/installing-openvino-linux.md). -By default, the installation directory is `/opt/intel/openvino`, but the installation gave you the option to use the directory of your choice. If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `/opt/intel` with the directory in which you installed the software. +By default, the Intel® Distribution of OpenVINO™ is installed to the following directory, referred to as ``: +* For root or administrator: `/opt/intel/openvino_/` +* For regular users: `/home//intel/openvino_/` -The primary tools for deploying your models and applications are installed to the `/opt/intel/openvino/deployment_tools` directory. +For simplicity, a symbolic link to the latest installation is also created: `/home//intel/openvino_2021/` + +If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `/opt/intel` or `/home//` with the directory in which you installed the software. + +The primary tools for deploying your models and applications are installed to the `/opt/intel/openvino_2021/deployment_tools` directory.
Click for the Intel® Distribution of OpenVINO™ toolkit directory structure @@ -57,7 +63,7 @@ The simplified OpenVINO™ workflow is: ## Use the Demo Scripts to Learn the Workflow -The demo scripts in `/opt/intel/openvino/deployment_tools/demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps let you see how to: +The demo scripts in `/opt/intel/openvino_2021/deployment_tools/demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps let you see how to: * Compile several samples from the source files delivered as part of the OpenVINO toolkit. * Download trained models. * Perform pipeline steps and see the output on the console. @@ -221,7 +227,7 @@ This guide uses the Model Downloader to get pre-trained models. You can use one * **List the models available in the downloader**: ```sh -cd /opt/intel/openvino/deployment_tools/tools/model_downloader/ +cd /opt/intel/openvino_2021/deployment_tools/tools/model_downloader/ ``` ```sh python3 info_dumper.py --print_all @@ -325,7 +331,7 @@ The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognit 3. Run the Model Optimizer script: ```sh - cd /opt/intel/openvino/deployment_tools/model_optimizer + cd /opt/intel/openvino_2021/deployment_tools/model_optimizer ``` ```sh python3 ./mo.py --input_model / --data_type --output_dir @@ -338,7 +344,7 @@ The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognit The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `~/models/public/squeezenet1.1/ir` output directory: ```sh - cd /opt/intel/openvino/deployment_tools/model_optimizer + cd /opt/intel/openvino_2021/deployment_tools/model_optimizer ``` ```sh python3 ./mo.py --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir @@ -346,9 +352,9 @@ The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory. -Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino/deployment_tools/demo/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: +Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2021/deployment_tools/demo/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: ```sh - cp /opt/intel/openvino/deployment_tools/demo/squeezenet1.1.labels + cp /opt/intel/openvino_2021/deployment_tools/demo/squeezenet1.1.labels ```
@@ -359,8 +365,8 @@ Many sources are available from which you can download video media to use the co - https://images.google.com As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications: -* `/opt/intel/openvino/deployment_tools/demo/car.png` -* `/opt/intel/openvino/deployment_tools/demo/car_1.bmp` +* `/opt/intel/openvino_2021/deployment_tools/demo/car.png` +* `/opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp` ### Step 4: Run the Image Classification Code Sample @@ -370,7 +376,7 @@ To run the **Image Classification** code sample with an input image on the IR: 1. Set up the OpenVINO environment variables: ```sh - source /opt/intel/openvino/bin/setupvars.sh + source /opt/intel/openvino_2021/bin/setupvars.sh ``` 2. Go to the code samples build directory: ```sh @@ -383,32 +389,32 @@ To run the **Image Classification** code sample with an input image on the IR:
Click for examples of running the Image Classification code sample on different devices -The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino/deployment_tools/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: +The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2021/deployment_tools/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: **CPU:** ```sh - ./classification_sample_async -i /opt/intel/openvino/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU + ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU ``` **GPU:** > **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh - ./classification_sample_async -i /opt/intel/openvino/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d GPU + ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d GPU ``` **MYRIAD:** > **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh - ./classification_sample_async -i /opt/intel/openvino/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD + ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD ``` **HDDL:** > **NOTE**: Running inference on the Intel® Vision Accelerator Design with Intel® Movidius™ VPUs device with the HDDL plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh - ./classification_sample_async -i /opt/intel/openvino/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d HDDL + ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d HDDL ``` When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU: @@ -449,7 +455,7 @@ To run the **Security Barrier Camera Demo Application** using an input image on 1. Set up the OpenVINO environment variables: ```sh - source /opt/intel/openvino/bin/setupvars.sh + source /opt/intel/openvino_2021/bin/setupvars.sh ``` 2. Go to the demo application build directory: ```sh @@ -466,14 +472,14 @@ To run the **Security Barrier Camera Demo Application** using an input image on **CPU:** ```sh -./security_barrier_camera_demo -i /opt/intel/openvino/deployment_tools/demo/car_1.bmp -m /home/username/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va /home/username/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr /home/username/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU +./security_barrier_camera_demo -i /opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp -m /home/username/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va /home/username/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr /home/username/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU ``` **GPU:** > **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-linux.md). ```sh -./security_barrier_camera_demo -i /opt/intel/openvino/deployment_tools/demo/car_1.bmp -m /vehicle-license-plate-detection-barrier-0106.xml -m_va /vehicle-attributes-recognition-barrier-0039.xml -m_lpr /license-plate-recognition-barrier-0001.xml -d GPU +./security_barrier_camera_demo -i /opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp -m /vehicle-license-plate-detection-barrier-0106.xml -m_va /vehicle-attributes-recognition-barrier-0039.xml -m_lpr /license-plate-recognition-barrier-0001.xml -d GPU ``` **MYRIAD:** @@ -498,7 +504,7 @@ Following are some basic guidelines for executing the OpenVINO™ workflow using 1. Before using the OpenVINO™ samples, always set up the environment: ```sh -source /opt/intel/openvino/bin/setupvars.sh +source /opt/intel/openvino_2021/bin/setupvars.sh ``` 2. Have the directory path for the following: - Code Sample binaries located in `~/inference_engine_cpp_samples_build/intel64/Release` diff --git a/docs/get_started/get_started_macos.md b/docs/get_started/get_started_macos.md index 7711c2628e22a0..c36d7b58270b45 100644 --- a/docs/get_started/get_started_macos.md +++ b/docs/get_started/get_started_macos.md @@ -24,10 +24,12 @@ In addition, demo scripts, code samples and demo applications are provided to he This guide assumes you completed all Intel® Distribution of OpenVINO™ toolkit installation and configuration steps. If you have not yet installed and configured the toolkit, see [Install Intel® Distribution of OpenVINO™ toolkit for macOS*](../install_guides/installing-openvino-macos.md). By default, the Intel® Distribution of OpenVINO™ is installed to the following directory, referred to as ``: -* For root or administrator: `/opt/intel/openvino/` -* For regular users: `/home//intel/openvino/` +* For root or administrator: `/opt/intel/openvino_/` +* For regular users: `/home//intel/openvino_/` -If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `/opt/intel` or `/home//` with the directory in which you installed the software. +For simplicity, a symbolic link to the latest installation is also created: `/home//intel/openvino_2021/`. + +If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `/opt/intel` or `/home//` with the directory in which you installed the software. The primary tools for deploying your models and applications are installed to the `/deployment_tools` directory.
@@ -105,7 +107,7 @@ When the script completes, you see the label and confidence for the top-10 categ Top 10 results: -Image /opt/intel/openvino/deployment_tools/demo/car.png +Image /opt/intel/openvino_2021/deployment_tools/demo/car.png classid probability label ------- ----------- ----- @@ -216,7 +218,7 @@ This guide uses the Model Downloader to get pre-trained models. You can use one * **List the models available in the downloader**: ```sh -cd /opt/intel/openvino/deployment_tools/tools/model_downloader/ +cd /opt/intel/openvino_2021/deployment_tools/tools/model_downloader/ ``` ```sh python3 info_dumper.py --print_all @@ -321,7 +323,7 @@ The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognit 3. Run the Model Optimizer script: ```sh - cd /opt/intel/openvino/deployment_tools/model_optimizer + cd /opt/intel/openvino_2021/deployment_tools/model_optimizer ``` ```sh python3 ./mo.py --input_model / --data_type --output_dir @@ -334,7 +336,7 @@ The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognit The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `~/models/public/squeezenet1.1/ir` output directory: ```sh - cd /opt/intel/openvino/deployment_tools/model_optimizer + cd /opt/intel/openvino_2021/deployment_tools/model_optimizer ``` ```sh python3 ./mo.py --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir @@ -342,9 +344,9 @@ The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory. -Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino/deployment_tools/demo/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: +Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2021/deployment_tools/demo/` to ``. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers: ```sh - cp /opt/intel/openvino/deployment_tools/demo/squeezenet1.1.labels + cp /opt/intel/openvino_2021/deployment_tools/demo/squeezenet1.1.labels ```
@@ -355,8 +357,8 @@ Many sources are available from which you can download video media to use the co - https://images.google.com As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications: -* `/opt/intel/openvino/deployment_tools/demo/car.png` -* `/opt/intel/openvino/deployment_tools/demo/car_1.bmp` +* `/opt/intel/openvino_2021/deployment_tools/demo/car.png` +* `/opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp` ### Step 4: Run the Image Classification Code Sample @@ -366,7 +368,7 @@ To run the **Image Classification** code sample with an input image on the IR: 1. Set up the OpenVINO environment variables: ```sh - source /opt/intel/openvino/bin/setupvars.sh + source /opt/intel/openvino_2021/bin/setupvars.sh ``` 2. Go to the code samples build directory: ```sh @@ -379,11 +381,11 @@ To run the **Image Classification** code sample with an input image on the IR:
Click for examples of running the Image Classification code sample on different devices -The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino/deployment_tools/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: +The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2021/deployment_tools/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices: **CPU:** ```sh - ./classification_sample_async -i /opt/intel/openvino/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU + ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU ``` @@ -391,14 +393,14 @@ The following commands run the Image Classification Code Sample using the `car.p > **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-macos.md). ```sh - ./classification_sample_async -i /opt/intel/openvino/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD + ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD ``` When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU: ```sh Top 10 results: -Image /opt/intel/openvino/deployment_tools/demo/car.png +Image /opt/intel/openvino_2021/deployment_tools/demo/car.png classid probability label ------- ----------- ----- @@ -426,7 +428,7 @@ To run the **Security Barrier Camera Demo Application** using an input image on 1. Set up the OpenVINO environment variables: ```sh - source /opt/intel/openvino/bin/setupvars.sh + source /opt/intel/openvino_2021/bin/setupvars.sh ``` 2. Go to the demo application build directory: ```sh @@ -443,7 +445,7 @@ To run the **Security Barrier Camera Demo Application** using an input image on **CPU:** ```sh -./security_barrier_camera_demo -i /opt/intel/openvino/deployment_tools/demo/car_1.bmp -m ~/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va ~/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr ~/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU +./security_barrier_camera_demo -i /opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp -m ~/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va ~/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr ~/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU ``` **MYRIAD:** @@ -461,7 +463,7 @@ Following are some basic guidelines for executing the OpenVINO™ workflow using 1. Before using the OpenVINO™ samples, always set up the environment: ```sh -source /opt/intel/openvino/bin/setupvars.sh +source /opt/intel/openvino_2021/bin/setupvars.sh ``` 2. Have the directory path for the following: - Code Sample binaries located in `~/inference_engine_cpp_samples_build/intel64/Release` diff --git a/docs/get_started/get_started_windows.md b/docs/get_started/get_started_windows.md index b160199014061f..f360c80f12b0fd 100644 --- a/docs/get_started/get_started_windows.md +++ b/docs/get_started/get_started_windows.md @@ -24,7 +24,7 @@ In addition, demo scripts, code samples and demo applications are provided to he ## Intel® Distribution of OpenVINO™ toolkit Installation and Deployment Tools Directory Structure This guide assumes you completed all Intel® Distribution of OpenVINO™ toolkit installation and configuration steps. If you have not yet installed and configured the toolkit, see [Install Intel® Distribution of OpenVINO™ toolkit for Windows*](../install_guides/installing-openvino-windows.md). -By default, the installation directory is `C:\Program Files (x86)\IntelSWTools\openvino`, referred to as ``. If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `C:\Program Files (x86)\IntelSWTools` with the directory in which you installed the software. +By default, the installation directory is `C:\Program Files (x86)\Intel\openvino_`, referred to as ``. If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `C:\Program Files (x86)\Intel` with the directory in which you installed the software. For simplicity, a shortcut to the latest installation is also created: `C:\Program Files (x86)\Intel\openvino_2021`. The primary tools for deploying your models and applications are installed to the `\deployment_tools` directory.
@@ -106,7 +106,7 @@ When the script completes, you see the label and confidence for the top-10 categ Top 10 results: -Image C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\demo\car.png +Image C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\demo\car.png classid probability label ------- ----------- ----- @@ -403,7 +403,7 @@ When the Sample Application completes, you see the label and confidence for the ```bat Top 10 results: -Image C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\demo\car.png +Image C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\demo\car.png classid probability label ------- ----------- ----- diff --git a/docs/img/openvino-install-windows-01.png b/docs/img/openvino-install-windows-01.png index 569052995c5fa8..666d39b3912630 100644 --- a/docs/img/openvino-install-windows-01.png +++ b/docs/img/openvino-install-windows-01.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de85bd59edc66bfd37aab395bc7e2dde2988f16c7ff263153d382bfcbeb9ff2e -size 35998 +oid sha256:e2a218afd50f8112f94c032439f69992abb54a551566ab8b4734405d6332499d +size 32796 diff --git a/docs/img/openvino-install-windows-02.png b/docs/img/openvino-install-windows-02.png index b83cf8472c6acc..52e1a2328363aa 100644 --- a/docs/img/openvino-install-windows-02.png +++ b/docs/img/openvino-install-windows-02.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b2586ce56ff1a5c0527b53dc21aa09b489c11e24fec82c6a58e2db860a772c4 -size 39720 +oid sha256:4ad93452fb1020baa7b5de0eb859bdb89e609a4ba8eb382baafb75b3194080ff +size 47318 diff --git a/docs/img/openvino-install-windows-03.png b/docs/img/openvino-install-windows-03.png index a96a2e17eab254..80cad99a9bf514 100644 --- a/docs/img/openvino-install-windows-03.png +++ b/docs/img/openvino-install-windows-03.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3faa8b02a8477b5d764ea2d47502bc0a878087614e0516704cc1525b5b60dedb -size 26412 +oid sha256:bfdd8e4dcc4d7acd4a1003e7a3d933f17ac05761c5b2e4ebedd23e7f8242d5db +size 26598 diff --git a/docs/install_guides/installing-openvino-linux.md b/docs/install_guides/installing-openvino-linux.md index ee50e397966246..9862eacd6e690a 100644 --- a/docs/install_guides/installing-openvino-linux.md +++ b/docs/install_guides/installing-openvino-linux.md @@ -128,7 +128,7 @@ looks like this: ![](../img/openvino-install-linux-03.png) When installed as **root** the default installation directory for the Intel Distribution of OpenVINO is `/opt/intel/openvino_/`.
- For simplicity, a symbolic link to the latest installation is also created: `/opt/intel/openvino/`. + For simplicity, a symbolic link to the latest installation is also created: `/opt/intel/openvino_2021/`. > **NOTE**: The Intel® Media SDK component is always installed in the `/opt/intel/mediasdk` directory regardless of the OpenVINO installation path chosen. 8. A Complete screen indicates that the core components have been installed: @@ -149,7 +149,7 @@ These dependencies are required for: 1. Change to the `install_dependencies` directory: ```sh -cd /opt/intel/openvino/install_dependencies +cd /opt/intel/openvino_2021/install_dependencies ``` 2. Run a script to download and install the external software dependencies: ```sh @@ -162,7 +162,7 @@ The dependencies are installed. Continue to the next section to set your environ You must update several environment variables before you can compile and run OpenVINO™ applications. Run the following script to temporarily set your environment variables: ```sh -source /opt/intel/openvino/bin/setupvars.sh +source /opt/intel/openvino_2021/bin/setupvars.sh ``` **Optional:** The OpenVINO environment variables are removed when you close the shell. As an option, you can permanently set the environment variables as follows: @@ -174,7 +174,7 @@ vi /.bashrc 2. Add this line to the end of the file: ```sh -source /opt/intel/openvino/bin/setupvars.sh +source /opt/intel/openvino_2021/bin/setupvars.sh ``` 3. Save and close the file: press the **Esc** key and type `:wq`. @@ -210,7 +210,7 @@ You can choose to either configure all supported frameworks at once **OR** confi 1. Go to the Model Optimizer prerequisites directory: ```sh -cd /opt/intel/openvino/deployment_tools/model_optimizer/install_prerequisites +cd /opt/intel/openvino_2021/deployment_tools/model_optimizer/install_prerequisites ``` 2. Run the script to configure the Model Optimizer for Caffe, TensorFlow 1.x, MXNet, Kaldi\*, and ONNX: @@ -224,7 +224,7 @@ Configure individual frameworks separately **ONLY** if you did not select **Opti 1. Go to the Model Optimizer prerequisites directory: ```sh -cd /opt/intel/openvino/deployment_tools/model_optimizer/install_prerequisites +cd /opt/intel/openvino_2021/deployment_tools/model_optimizer/install_prerequisites ``` 2. Run the script for your model framework. You can run more than one script: @@ -271,7 +271,7 @@ To verify the installation and compile two samples, use the steps below to run t 1. Go to the **Inference Engine demo** directory: ```sh -cd /opt/intel/openvino/deployment_tools/demo +cd /opt/intel/openvino_2021/deployment_tools/demo ``` 2. Run the **Image Classification verification script**: @@ -296,7 +296,7 @@ This script downloads three pre-trained model IRs, builds the [Security Barrier 4. Close the image viewer window to complete the verification script. -To learn about the verification scripts, see the `README.txt` file in `/opt/intel/openvino/deployment_tools/demo`. +To learn about the verification scripts, see the `README.txt` file in `/opt/intel/openvino_2021/deployment_tools/demo`. For a description of the Intel Distribution of OpenVINO™ pre-trained object detection and object recognition models, see [Overview of OpenVINO™ Toolkit Pre-Trained Models](@ref omz_models_intel_index). @@ -312,7 +312,7 @@ The steps in this section are required only if you want to enable the toolkit co 1. Go to the install_dependencies directory: ```sh -cd /opt/intel/openvino/install_dependencies/ +cd /opt/intel/openvino_2021/install_dependencies/ ``` 2. Enter the super user mode: ```sh @@ -350,7 +350,7 @@ Log out and log in for it to take effect. 2. To perform inference on Intel® Neural Compute Stick 2, install the USB rules as follows: ```sh -sudo cp /opt/intel/openvino/inference_engine/external/97-myriad-usbboot.rules /etc/udev/rules.d/ +sudo cp /opt/intel/openvino_2021/inference_engine/external/97-myriad-usbboot.rules /etc/udev/rules.d/ ``` ```sh sudo udevadm control --reload-rules @@ -373,7 +373,7 @@ After configuration is done, you are ready to run the verification scripts with 1. Go to the **Inference Engine demo** directory: ```sh -cd /opt/intel/openvino/deployment_tools/demo +cd /opt/intel/openvino_2021/deployment_tools/demo ``` 2. Run the **Image Classification verification script**. If you have access to the Internet through the proxy server only, please make sure that it is configured in your OS environment. @@ -403,7 +403,7 @@ To run the sample application: 1. Set up environment variables: ```sh -source /opt/intel/openvino/bin/setupvars.sh +source /opt/intel/openvino_2021/bin/setupvars.sh ``` 2. Go to the samples build directory: ```sh @@ -414,24 +414,24 @@ cd ~/inference_engine_samples_build/intel64/Release - **For CPU**: ```sh - ./classification_sample_async -i /opt/intel/openvino/deployment_tools/demo/car.png -m ~/openvino_models/ir/public/squeezenet1.1/FP16/squeezenet1.1.xml -d CPU + ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/openvino_models/ir/public/squeezenet1.1/FP16/squeezenet1.1.xml -d CPU ``` - **For GPU**: ```sh - ./classification_sample_async -i /opt/intel/openvino/deployment_tools/demo/car.png -m ~/openvino_models/ir/public/squeezenet1.1/FP16/squeezenet1.1.xml -d GPU + ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/openvino_models/ir/public/squeezenet1.1/FP16/squeezenet1.1.xml -d GPU ``` - **For MYRIAD**: > **NOTE**: Running inference on Intel® Neural Compute Stick 2 with the MYRIAD plugin requires performing [additional hardware configuration steps](#additional-NCS-steps). ```sh - ./classification_sample_async -i /opt/intel/openvino/deployment_tools/demo/car.png -m ~/openvino_models/ir/public/squeezenet1.1/FP16/squeezenet1.1.xml -d MYRIAD + ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/openvino_models/ir/public/squeezenet1.1/FP16/squeezenet1.1.xml -d MYRIAD ``` - **For HDDL**: > **NOTE**: Running inference on Intel® Vision Accelerator Design with Intel® Movidius™ VPUs with the HDDL plugin requires performing [additional hardware configuration steps](installing-openvino-linux-ivad-vpu.md) ```sh - ./classification_sample_async -i /opt/intel/openvino/deployment_tools/demo/car.png -m ~/openvino_models/ir/public/squeezenet1.1/FP16/squeezenet1.1.xml -d HDDL + ./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/openvino_models/ir/public/squeezenet1.1/FP16/squeezenet1.1.xml -d HDDL ``` For information on Sample Applications, see the [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md). diff --git a/docs/install_guides/installing-openvino-macos.md b/docs/install_guides/installing-openvino-macos.md index 15a385ec526bde..400d255e9d81f6 100644 --- a/docs/install_guides/installing-openvino-macos.md +++ b/docs/install_guides/installing-openvino-macos.md @@ -107,11 +107,11 @@ The disk image is mounted to `/Volumes/m_openvino_toolkit_p_` and autom - If you used **root** or **administrator** privileges to run the installer, it installs the OpenVINO toolkit to `/opt/intel/openvino_/` - For simplicity, a symbolic link to the latest installation is also created: `/opt/intel/openvino/` + For simplicity, a symbolic link to the latest installation is also created: `/opt/intel/openvino_2021/` - If you used **regular user** privileges to run the installer, it installs the OpenVINO toolkit to `/home//intel/openvino_/` - For simplicity, a symbolic link to the latest installation is also created: `/home//intel/openvino/` + For simplicity, a symbolic link to the latest installation is also created: `/home//intel/openvino_2021/` 9. If needed, click **Customize** to change the installation directory or the components you want to install: ![](../img/openvino-install-macos-04.png) @@ -131,7 +131,7 @@ The disk image is mounted to `/Volumes/m_openvino_toolkit_p_` and autom You need to update several environment variables before you can compile and run OpenVINO™ applications. Open the macOS Terminal\* or a command-line interface shell you prefer and run the following script to temporarily set your environment variables: ```sh - source /opt/intel/openvino/bin/setupvars.sh + source /opt/intel/openvino_2021/bin/setupvars.sh ``` Optional: The OpenVINO environment variables are removed when you close the shell. You can permanently set the environment variables as follows: @@ -144,7 +144,7 @@ You need to update several environment variables before you can compile and run 3. Add this line to the end of the file: ```sh - source /opt/intel/openvino/bin/setupvars.sh + source /opt/intel/openvino_2021/bin/setupvars.sh ``` 3. Save and close the file: press the **Esc** key, type `:wq` and press the **Enter** key. @@ -178,7 +178,7 @@ You can choose to either configure the Model Optimizer for all supported framewo 1. Go to the Model Optimizer prerequisites directory: ```sh - cd /opt/intel/openvino/deployment_tools/model_optimizer/install_prerequisites + cd /opt/intel/openvino_2021/deployment_tools/model_optimizer/install_prerequisites ``` 2. Run the script to configure the Model Optimizer for Caffe, TensorFlow 1.x, MXNet, Kaldi\*, and ONNX: @@ -192,7 +192,7 @@ Configure individual frameworks separately **ONLY** if you did not select **Opti 1. Go to the Model Optimizer prerequisites directory: ```sh - cd /opt/intel/openvino/deployment_tools/model_optimizer/install_prerequisites + cd /opt/intel/openvino_2021/deployment_tools/model_optimizer/install_prerequisites ``` 2. Run the script for your model framework. You can run more than one script: @@ -243,7 +243,7 @@ To verify the installation and compile two Inference Engine samples, run the ver 1. Go to the **Inference Engine demo** directory: ```sh - cd /opt/intel/openvino/deployment_tools/demo + cd /opt/intel/openvino_2021/deployment_tools/demo ``` 2. Run the **Image Classification verification script**: @@ -263,7 +263,7 @@ This script is complete. Continue to the next section to run the Inference Pipel ### Run the Inference Pipeline Verification Script -While still in `/opt/intel/openvino/deployment_tools/demo/`, run the Inference Pipeline verification script: +While still in `/opt/intel/openvino_2021/deployment_tools/demo/`, run the Inference Pipeline verification script: ```sh ./demo_security_barrier_camera.sh ``` @@ -299,7 +299,7 @@ Visit the Intel Distribution of OpenVINO Toolkit [Inference Tutorials for Face D ## Additional Resources -- To learn more about the verification applications, see `README.txt` in `/opt/intel/openvino/deployment_tools/demo/`. +- To learn more about the verification applications, see `README.txt` in `/opt/intel/openvino_2021/deployment_tools/demo/`. - For detailed description of the pre-trained models, go to the [Overview of OpenVINO toolkit Pre-Trained Models](@ref omz_models_intel_index) page. diff --git a/docs/install_guides/installing-openvino-windows.md b/docs/install_guides/installing-openvino-windows.md index 1d3da007f2db75..0cf4408f40f5b4 100644 --- a/docs/install_guides/installing-openvino-windows.md +++ b/docs/install_guides/installing-openvino-windows.md @@ -99,7 +99,7 @@ The following components are installed by default: 1. If you have not downloaded the Intel® Distribution of OpenVINO™ toolkit, [download the latest version](http://software.intel.com/en-us/openvino-toolkit/choose-download/free-download-windows). By default, the file is saved to the `Downloads` directory as `w_openvino_toolkit_p_.exe`. -2. Go to the `Downloads` folder and double-click `w_openvino_toolkit_p_.exe`. A window opens to let you choose your installation directory and components. The default installation directory is `C:\Program Files (x86)\IntelSWTools\openvino_`, for simplicity, a shortcut to the latest installation is also created: `C:\Program Files (x86)\IntelSWTools\openvino`. If you choose a different installation directory, the installer will create the directory for you: +2. Go to the `Downloads` folder and double-click `w_openvino_toolkit_p_.exe`. A window opens to let you choose your installation directory and components. The default installation directory is `C:\Program Files (x86)\Intel\openvino_`, for simplicity, a shortcut to the latest installation is also created: `C:\Program Files (x86)\Intel\openvino_2021`. If you choose a different installation directory, the installer will create the directory for you: ![](../img/openvino-install-windows-01.png) @@ -124,11 +124,11 @@ The screen example below indicates you are missing two dependencies: ### Set the Environment Variables -> **NOTE**: If you installed the Intel® Distribution of OpenVINO™ to the non-default install directory, replace `C:\Program Files (x86)\IntelSWTools` with the directory in which you installed the software. +> **NOTE**: If you installed the Intel® Distribution of OpenVINO™ to the non-default install directory, replace `C:\Program Files (x86)\Intel` with the directory in which you installed the software. You must update several environment variables before you can compile and run OpenVINO™ applications. Open the Command Prompt, and run the `setupvars.bat` batch file to temporarily set your environment variables: ```sh -cd C:\Program Files (x86)\IntelSWTools\openvino\bin\ +cd C:\Program Files (x86)\Intel\openvino_2021\bin\ ``` ```sh @@ -152,7 +152,7 @@ The Model Optimizer is a key component of the Intel® Distribution of OpenVINO The Inference Engine reads, loads, and infers the IR files, using a common API across the CPU, GPU, or VPU hardware. -The Model Optimizer is a Python*-based command line tool (`mo.py`), which is located in `C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\model_optimizer`. Use this tool on models trained with popular deep learning frameworks such as Caffe\*, TensorFlow\*, MXNet\*, and ONNX\* to convert them to an optimized IR format that the Inference Engine can use. +The Model Optimizer is a Python*-based command line tool (`mo.py`), which is located in `C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\model_optimizer`. Use this tool on models trained with popular deep learning frameworks such as Caffe\*, TensorFlow\*, MXNet\*, and ONNX\* to convert them to an optimized IR format that the Inference Engine can use. This section explains how to use scripts to configure the Model Optimizer either for all of the supported frameworks at the same time or for individual frameworks. If you want to manually configure the Model Optimizer instead of using scripts, see the **Using Manual Configuration Process** section on the [Configuring the Model Optimizer](../MO_DG/prepare_model/Config_Model_Optimizer.md) page. @@ -167,8 +167,8 @@ You can configure the Model Optimizer either for all supported frameworks at onc > **NOTE**: > In the steps below: -> - If you you want to use the Model Optimizer from another installed versions of Intel® Distribution of OpenVINO™ toolkit installed, replace `openvino` with `openvino_`. -> - If you installed the Intel® Distribution of OpenVINO™ toolkit to the non-default installation directory, replace `C:\Program Files (x86)\IntelSWTools` with the directory where you installed the software. +> - If you you want to use the Model Optimizer from another installed versions of Intel® Distribution of OpenVINO™ toolkit installed, replace `openvino_2021` with `openvino_`, where `` is the required version. +> - If you installed the Intel® Distribution of OpenVINO™ toolkit to the non-default installation directory, replace `C:\Program Files (x86)\Intel` with the directory where you installed the software. These steps use a command prompt to make sure you see error messages. @@ -181,7 +181,7 @@ Type commands in the opened window: 2. Go to the Model Optimizer prerequisites directory.
```sh -cd C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\model_optimizer\install_prerequisites +cd C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\model_optimizer\install_prerequisites ``` 3. Run the following batch file to configure the Model Optimizer for Caffe\*, TensorFlow\* 1.x, MXNet\*, Kaldi\*, and ONNX\*:
@@ -193,7 +193,7 @@ install_prerequisites.bat 1. Go to the Model Optimizer prerequisites directory:
```sh -cd C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\model_optimizer\install_prerequisites +cd C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\model_optimizer\install_prerequisites ``` 2. Run the batch file for the framework you will use with the Model Optimizer. You can use more than one: @@ -242,14 +242,14 @@ If you want to use a GPU or VPU, or update your Windows* environment variables, > **IMPORTANT**: This section is required. In addition to confirming your installation was successful, demo scripts perform other steps, such as setting up your computer to use the Inference Engine samples. > **NOTE**: -> The paths in this section assume you used the default installation directory. If you used a directory other than `C:\Program Files (x86)\IntelSWTools`, update the directory with the location where you installed the software. +> The paths in this section assume you used the default installation directory. If you used a directory other than `C:\Program Files (x86)\Intel`, update the directory with the location where you installed the software. To verify the installation and compile two samples, run the verification applications provided with the product on the CPU: 1. Open a command prompt window. 2. Go to the Inference Engine demo directory:
```sh - cd C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\demo\ + cd C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\demo\ ``` 3. Run the verification scripts by following the instructions in the next section. @@ -291,7 +291,7 @@ When the demo completes, you have two windows open: Close the image viewer window to end the demo. -To learn more about the verification scripts, see `README.txt` in `C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\demo`. +To learn more about the verification scripts, see `README.txt` in `C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\demo`. For detailed description of the OpenVINO™ pre-trained object detection and object recognition models, see the [Overview of OpenVINO™ toolkit Pre-Trained Models](@ref omz_models_intel_index) page. @@ -358,7 +358,7 @@ After configuration is done, you are ready to run the verification scripts with 2. Go to the Inference Engine demo directory: ```sh - cd C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\demo\ + cd C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\demo\ ``` 3. Run the Image Classification verification script. If you have access to the Internet through the proxy server only, please make sure that it is configured in your environment. ```sh @@ -405,13 +405,13 @@ Image Classification sample application binary file was automatically built and The Image Classification sample application binary file located in the `C:\Users\\Documents\Intel\OpenVINO\inference_engine_samples_build\intel64\Release\` directory. The Caffe* Squeezenet model IR files (`.bin` and `.xml`) are located in the in the `C:\Users\\Documents\Intel\OpenVINO\openvino_models\ir\public\squeezenet1.1\FP16\` directory. -> **NOTE**: If you installed the Intel® Distribution of OpenVINO™ toolkit to the non-default installation directory, replace `C:\Program Files (x86)\IntelSWTools` with the directory where you installed the software. +> **NOTE**: If you installed the Intel® Distribution of OpenVINO™ toolkit to the non-default installation directory, replace `C:\Program Files (x86)\Intel` with the directory where you installed the software. To run the sample application: 1. Set up environment variables: ```sh -cd C:\Program Files (x86)\IntelSWTools\openvino\bin\setupvars.bat +cd C:\Program Files (x86)\Intel\openvino_2021\bin\setupvars.bat ``` 2. Go to the samples build directory: ```sh @@ -422,22 +422,22 @@ cd C:\Users\\Documents\Intel\OpenVINO\inference_engine_samples_build\i - For CPU: ```sh - classification_sample_async.exe -i "C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\demo\car.png" -m "C:\Users\\Documents\Intel\OpenVINO\openvino_models\ir\public\squeezenet1.1\FP16\squeezenet1.1.xml" -d CPU + classification_sample_async.exe -i "C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\demo\car.png" -m "C:\Users\\Documents\Intel\OpenVINO\openvino_models\ir\public\squeezenet1.1\FP16\squeezenet1.1.xml" -d CPU ``` - For GPU: ```sh - classification_sample_async.exe -i "C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\demo\car.png" -m "C:\Users\\Documents\Intel\OpenVINO\openvino_models\ir\public\squeezenet1.1\FP16\squeezenet1.1.xml" -d GPU + classification_sample_async.exe -i "C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\demo\car.png" -m "C:\Users\\Documents\Intel\OpenVINO\openvino_models\ir\public\squeezenet1.1\FP16\squeezenet1.1.xml" -d GPU ``` - For VPU (Intel® Neural Compute Stick 2): ```sh - classification_sample_async.exe -i "C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\demo\car.png" -m "C:\Users\\Documents\Intel\OpenVINO\openvino_models\ir\public\squeezenet1.1\FP16\squeezenet1.1.xml" -d MYRIAD + classification_sample_async.exe -i "C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\demo\car.png" -m "C:\Users\\Documents\Intel\OpenVINO\openvino_models\ir\public\squeezenet1.1\FP16\squeezenet1.1.xml" -d MYRIAD ``` - For VPU (Intel® Vision Accelerator Design with Intel® Movidius™ VPUs): ```sh - classification_sample_async.exe -i "C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\demo\car.png" -m "C:\Users\\Documents\Intel\OpenVINO\openvino_models\ir\public\squeezenet1.1\FP16\squeezenet1.1.xml" -d HDDL + classification_sample_async.exe -i "C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\demo\car.png" -m "C:\Users\\Documents\Intel\OpenVINO\openvino_models\ir\public\squeezenet1.1\FP16\squeezenet1.1.xml" -d HDDL ``` For information on Sample Applications, see the [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md). From a953fedcb8a22f1dc4d6a463571e6f570dbf9603 Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Fri, 9 Oct 2020 17:42:09 +0300 Subject: [PATCH 32/41] Added PIP installation and Build from Source to the layout --- docs/doxygen/openvino_docs.xml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/doxygen/openvino_docs.xml b/docs/doxygen/openvino_docs.xml index 0fc6bb0ff71fb7..cbeea276f69452 100644 --- a/docs/doxygen/openvino_docs.xml +++ b/docs/doxygen/openvino_docs.xml @@ -28,6 +28,8 @@ + + From c38c987fce61dbf72cbd4ecd55234a0af128a8fa Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Fri, 9 Oct 2020 18:10:49 +0300 Subject: [PATCH 33/41] Fixed formatting issue, removed broken link --- docs/install_guides/installing-openvino-pip.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/install_guides/installing-openvino-pip.md b/docs/install_guides/installing-openvino-pip.md index dbb7c6cf4cc186..520096e978c4e4 100644 --- a/docs/install_guides/installing-openvino-pip.md +++ b/docs/install_guides/installing-openvino-pip.md @@ -23,12 +23,12 @@ This guide provides installation steps for the Intel® distribution of OpenVINO ``` 3. Add PATH to environment variables. - - Ubuntu* 18.04 and macOS*: + - Ubuntu* 18.04 and macOS*: ```sh export LD_LIBRARY_PATH=/lib:${LD_LIBRARY_PATH} ``` - - Windows* 10: - ```sh + - Windows* 10: + ```sh set PATH=/Library/bin;%PATH% ``` 4. Verify that the package is installed: @@ -46,6 +46,5 @@ Now you are ready to develop and run your application. - [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). - [Inference Engine Developer Guide](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md). - For more information on Sample Applications, see the [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md). -- For information on Inference Engine Tutorials, see the [Inference Tutorials](https://github.com/intel-iot-devkit/inference-tutorials-generic). - [Intel® Distribution of OpenVINO™ toolkit PIP home page](https://pypi.org/project/openvino-python/) From fd0c1963b41c706f4f15c1b1a62f6af0e7adcc2c Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Fri, 9 Oct 2020 18:54:23 +0300 Subject: [PATCH 34/41] Renamed section EXAMPLES to RESOURCES according to review comments --- docs/doxygen/openvino_docs.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/doxygen/openvino_docs.xml b/docs/doxygen/openvino_docs.xml index cbeea276f69452..55247f36ee73b5 100644 --- a/docs/doxygen/openvino_docs.xml +++ b/docs/doxygen/openvino_docs.xml @@ -85,8 +85,8 @@ - - + + From 541ca0adc04695b9ef34189a519a6ac48fbf6bfe Mon Sep 17 00:00:00 2001 From: Nikolay Tyukaev Date: Fri, 9 Oct 2020 21:42:40 +0300 Subject: [PATCH 35/41] add mo faq navigation by url param --- docs/MO_DG/prepare_model/Model_Optimizer_FAQ.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/MO_DG/prepare_model/Model_Optimizer_FAQ.md b/docs/MO_DG/prepare_model/Model_Optimizer_FAQ.md index 4dc93936126978..f04d413bd1aab1 100644 --- a/docs/MO_DG/prepare_model/Model_Optimizer_FAQ.md +++ b/docs/MO_DG/prepare_model/Model_Optimizer_FAQ.md @@ -615,3 +615,16 @@ You need to specify values for each input of the model. For more information, re #### 102. What does the message "Operation _contrib_box_nms is not supported ..." mean? It means that you trying to convert the topology which contains '_contrib_box_nms' operation which is not supported directly. However the sub-graph of operations including the '_contrib_box_nms' could be replaced with DetectionOutput layer if your topology is one of the gluoncv topologies. Specify '--enable_ssd_gluoncv' command line parameter for the Model Optimizer to enable this transformation. + +\htmlonly + + + +\endhtmlonly \ No newline at end of file From 4e65b757ddd15ac324884edf5b9e22b49fdb73f2 Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Mon, 12 Oct 2020 12:48:09 +0300 Subject: [PATCH 36/41] Removed DLDT description --- docs/install_guides/installing-openvino-macos.md | 11 +++++------ docs/install_guides/installing-openvino-windows.md | 4 ++-- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/docs/install_guides/installing-openvino-macos.md b/docs/install_guides/installing-openvino-macos.md index 400d255e9d81f6..82f488981f5c39 100644 --- a/docs/install_guides/installing-openvino-macos.md +++ b/docs/install_guides/installing-openvino-macos.md @@ -1,22 +1,21 @@ # Install Intel® Distribution of OpenVINO™ toolkit for macOS* {#openvino_docs_install_guides_installing_openvino_macos} > **NOTES**: -> - The Intel® Distribution of OpenVINO™ is supported on macOS\* 10.14.x versions. -> - This installation has been validated on macOS 10.14.4. +> - The Intel® Distribution of OpenVINO™ is supported on macOS\* 10.15.x versions. > - An internet connection is required to follow the steps in this guide. If you have access to the Internet through the proxy server only, please make sure that it is configured in your OS environment. ## Introduction The Intel® Distribution of OpenVINO™ toolkit quickly deploys applications and solutions that emulate human vision. Based on Convolutional Neural Networks (CNN), the toolkit extends computer vision (CV) workloads across Intel® hardware, maximizing performance. -The Intel® Distribution of OpenVINO™ toolkit for macOS* includes the Intel® Deep Learning Deployment Toolkit (Intel® DLDT) and OpenCV* to deploy applications for accelerated inference on Intel® CPUs. +The Intel® Distribution of OpenVINO™ toolkit for macOS* includes the Inference Engine, OpenCV* libraries and Model Optimizer tool to deploy applications for accelerated inference on Intel® CPUs and Intel® Neural Compute Stick 2. The Intel® Distribution of OpenVINO™ toolkit for macOS*: -- Enables CNN-based deep learning inference on the edge +- Enables CNN-based deep learning inference on the edge - Supports heterogeneous execution across Intel® CPU and Intel® Neural Compute Stick 2 with Intel® Movidius™ VPUs -- Speeds time-to-market via an easy-to-use library of computer vision functions and pre-optimized kernels -- Includes optimized calls for computer vision standards including OpenCV\* +- Speeds time-to-market via an easy-to-use library of computer vision functions and pre-optimized kernels +- Includes optimized calls for computer vision standards including OpenCV\* **Included with the Installation** diff --git a/docs/install_guides/installing-openvino-windows.md b/docs/install_guides/installing-openvino-windows.md index 0cf4408f40f5b4..269c3b057e05e4 100644 --- a/docs/install_guides/installing-openvino-windows.md +++ b/docs/install_guides/installing-openvino-windows.md @@ -38,9 +38,9 @@ Your installation is complete when these are all completed: ### About the Intel® Distribution of OpenVINO™ toolkit -The Intel® Distribution of OpenVINO™ toolkit speeds the deployment of applications and solutions that emulate human vision. Based on Convolutional Neural Networks (CNN), the toolkit extends computer vision (CV) workloads across Intel® hardware to maximize performance. +OpenVINO™ toolkit is a comprehensive toolkit for quickly developing applications and solutions that solve a variety of tasks including emulation of human vision, automatic speech recognition, natural language processing, recommendation systems, and many others. Based on latest generations of artificial neural networks, including Convolutional Neural Networks (CNNs), recurrent and attention-based networks, the toolkit extends computer vision and non-vision workloads across Intel® hardware, maximizing performance. It accelerates applications with high-performance, AI and deep learning inference deployed from edge to cloud. -The Intel® Distribution of OpenVINO™ toolkit includes the Intel® Deep Learning Deployment Toolkit (Intel® DLDT). For more information, see the online [Intel® Distribution of OpenVINO™ toolkit Overview](https://software.intel.com/en-us/OpenVINO-toolkit) page. +For more information, see the online [Intel® Distribution of OpenVINO™ toolkit Overview](https://software.intel.com/en-us/OpenVINO-toolkit) page. The Intel® Distribution of OpenVINO™ toolkit for Windows\* 10 OS: From c96188918b6ea8d6cf51a6b1404dfa9a09ee4fe6 Mon Sep 17 00:00:00 2001 From: aalborov Date: Mon, 12 Oct 2020 20:39:34 +0300 Subject: [PATCH 37/41] Replaced wrong links --- docs/IE_DG/supported_plugins/MYRIAD.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/IE_DG/supported_plugins/MYRIAD.md b/docs/IE_DG/supported_plugins/MYRIAD.md index 5fbee431ee1c92..5ae032d4a245ec 100644 --- a/docs/IE_DG/supported_plugins/MYRIAD.md +++ b/docs/IE_DG/supported_plugins/MYRIAD.md @@ -6,11 +6,12 @@ The Inference Engine MYRIAD plugin is developed for inference of neural networks ## Installation on Linux* OS -For installation instructions, refer to the [Installation Guide for Linux*](../../../inference-engine/samples/benchmark_app/README.md). +For installation instructions, refer to the [Installation Guide for Linux*](../install_guides/installing-openvino-linux.md). + ## Installation on Windows* OS -For installation instructions, refer to the [Installation Guide for Windows*](../../../inference-engine/samples/benchmark_app/README.md). +For installation instructions, refer to the [Installation Guide for Windows*](../install_guides/installing-openvino-windows.md). ## Supported networks From 6aa0d2b95376e176d67e04ee8ff198d5efa02278 Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Mon, 12 Oct 2020 20:51:29 +0300 Subject: [PATCH 38/41] MInor fix for path to the cpp samples --- docs/install_guides/installing-openvino-raspbian.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/install_guides/installing-openvino-raspbian.md b/docs/install_guides/installing-openvino-raspbian.md index 28d620d8a1c5f2..4037e765dc96c5 100644 --- a/docs/install_guides/installing-openvino-raspbian.md +++ b/docs/install_guides/installing-openvino-raspbian.md @@ -144,7 +144,7 @@ mkdir build && cd build 2. Build the Object Detection Sample: ```sh -cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino/deployment_tools/inference_engine/samples*/cpp* +cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino/deployment_tools/inference_engine/samples/cpp ``` ```sh make -j2 object_detection_sample_ssd From 1a0fa4114a4490ca42e5736dc3c176b117cb3fc9 Mon Sep 17 00:00:00 2001 From: aalborov Date: Mon, 12 Oct 2020 21:00:13 +0300 Subject: [PATCH 39/41] fixes --- docs/IE_DG/supported_plugins/MYRIAD.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/IE_DG/supported_plugins/MYRIAD.md b/docs/IE_DG/supported_plugins/MYRIAD.md index 5ae032d4a245ec..3b1c3ec018c3c0 100644 --- a/docs/IE_DG/supported_plugins/MYRIAD.md +++ b/docs/IE_DG/supported_plugins/MYRIAD.md @@ -6,12 +6,12 @@ The Inference Engine MYRIAD plugin is developed for inference of neural networks ## Installation on Linux* OS -For installation instructions, refer to the [Installation Guide for Linux*](../install_guides/installing-openvino-linux.md). +For installation instructions, refer to the [Installation Guide for Linux*](../../install_guides/installing-openvino-linux.md). ## Installation on Windows* OS -For installation instructions, refer to the [Installation Guide for Windows*](../install_guides/installing-openvino-windows.md). +For installation instructions, refer to the [Installation Guide for Windows*](../../install_guides/installing-openvino-windows.md). ## Supported networks From 8c7db7daf563ac87f02775c03ce227a4f11faf31 Mon Sep 17 00:00:00 2001 From: Andrey Zaytsev Date: Tue, 13 Oct 2020 17:08:06 +0300 Subject: [PATCH 40/41] Update ops.py --- ngraph/python/src/ngraph/opset1/ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ngraph/python/src/ngraph/opset1/ops.py b/ngraph/python/src/ngraph/opset1/ops.py index 9ee2ab51ee9ab8..6735e8be336d33 100644 --- a/ngraph/python/src/ngraph/opset1/ops.py +++ b/ngraph/python/src/ngraph/opset1/ops.py @@ -806,7 +806,7 @@ def elu(data: NodeInput, alpha: NumericType, name: Optional[str] = None) -> Node Computes exponential linear: alpha * (exp(data) - 1) if < 0, data otherwise. For more information refer to: - [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](http://arxiv.org/abs/1511.07289) + [Fast and Accurate Deep Network Learning by Exponential Linear Units](http://arxiv.org/abs/1511.07289) @param data: Input tensor. One of: input node, array or scalar. @param alpha: Scalar multiplier for negative values. From 8105d882697bba5eca9e1587c07a1b2e114fb3b9 Mon Sep 17 00:00:00 2001 From: Rafal Blaczkowski Date: Wed, 14 Oct 2020 08:46:35 +0200 Subject: [PATCH 41/41] Fix style --- ngraph/python/src/ngraph/opset1/ops.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ngraph/python/src/ngraph/opset1/ops.py b/ngraph/python/src/ngraph/opset1/ops.py index 6735e8be336d33..00c1d5c9fecf90 100644 --- a/ngraph/python/src/ngraph/opset1/ops.py +++ b/ngraph/python/src/ngraph/opset1/ops.py @@ -2642,7 +2642,7 @@ def sqrt(node: NodeInput, name: Optional[str] = None) -> Node: def squared_difference( x1: NodeInput, x2: NodeInput, auto_broadcast: str = "NUMPY", name: Optional[str] = None ) -> Node: - """! Perform an element-wise squared difference between two tensors. + r"""! Perform an element-wise squared difference between two tensors. \f[ y[i] = (x_1[i] - x_2[i])^2 \f] @@ -2784,8 +2784,7 @@ def tensor_iterator( concat_output_desc: List[TensorIteratorConcatOutputDesc], name: Optional[str] = None, ) -> Node: - """! - Perform recurrent execution of the network described in the body, iterating through the data. + """! Perform recurrent execution of the network described in the body, iterating through the data. @param inputs: The provided to TensorIterator operator. @param graph_body: The graph representing the body we execute.