Skip to content

Commit

Permalink
[TF FE]: Support complex tensors for Prod operations (openvinotoolkit…
Browse files Browse the repository at this point in the history
…#26475)

### Details:
- Fixed fifth case of `atan2` implementation (it returned `pi/2` instead
of `-pi/2`).
- Moved `atan2` to `utils`.
- Create helper functions for converting complex number from rectangular
to polar form and the other way around.
- Support complex tensors for `Prod` operations + unit tests.

### Tickets:
 - [None](openvinotoolkit#23233)


### Resources used:
-
https://math.stackexchange.com/questions/1938894/imaginary-part-of-a-product-of-n-complex-numbers
 - https://en.m.wikipedia.org/wiki/Euler%27s_formula

---------

Co-authored-by: Roman Kazantsev <[email protected]>
  • Loading branch information
hub-bla and rkazants authored Sep 9, 2024
1 parent 94a9675 commit a8b4e91
Show file tree
Hide file tree
Showing 7 changed files with 198 additions and 36 deletions.
3 changes: 1 addition & 2 deletions src/frontends/tensorflow/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@
#include "openvino/op/reduce_max.hpp"
#include "openvino/op/reduce_mean.hpp"
#include "openvino/op/reduce_min.hpp"
#include "openvino/op/reduce_prod.hpp"
#include "openvino/op/reduce_sum.hpp"
#include "openvino/op/relu.hpp"
#include "openvino/op/sigmoid.hpp"
Expand Down Expand Up @@ -202,7 +201,6 @@ const std::map<std::string, CreatorFunction> get_supported_ops() {
{"Max", CreatorFunction(translate_direct_reduce_op<v1::ReduceMax>)},
{"Mean", CreatorFunction(translate_direct_reduce_op<v1::ReduceMean>)},
{"Min", CreatorFunction(translate_direct_reduce_op<v1::ReduceMin>)},
{"Prod", CreatorFunction(translate_direct_reduce_op<v1::ReduceProd>)},
{"Sum", CreatorFunction(translate_direct_reduce_op<v1::ReduceSum>)},

// Separate translators:
Expand Down Expand Up @@ -345,6 +343,7 @@ const std::map<std::string, CreatorFunction> get_supported_ops() {
{"Placeholder", CreatorFunction(translate_placeholder_linked_op)},
{"PlaceholderWithDefault", CreatorFunction(translate_placeholder_with_default_op)},
{"PreventGradient", CreatorFunction(translate_identity_op)},
{"Prod", CreatorFunction(translate_prod_op)},
{"Range", CreatorFunction(translate_range_op)},
{"Rank", CreatorFunction(translate_rank_op)},
{"RandomUniform", CreatorFunction(translate_random_uniform_op)},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,7 @@ OP_CONVERTER(translate_ones_like_op);
OP_CONVERTER(translate_pack_op);
OP_CONVERTER(translate_pad_op);
OP_CONVERTER(translate_padv2_op);
OP_CONVERTER(translate_prod_op);
OP_CONVERTER(translate_range_op);
OP_CONVERTER(translate_rank_op);
OP_CONVERTER(translate_random_uniform_op);
Expand Down
10 changes: 10 additions & 0 deletions src/frontends/tensorflow_common/include/utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,16 @@ ov::Output<ov::Node> create_dense_tensor(const ov::Output<ov::Node>& indices,
const ov::Output<ov::Node>& shape,
const ov::Output<ov::Node>& values);

ov::Output<ov::Node> atan2_op(const ov::Output<ov::Node>& y, const ov::Output<ov::Node>& x);

std::pair<ov::Output<ov::Node>, ov::Output<ov::Node>> complex_rectangular_to_polar(
const ov::Output<ov::Node>& real_part,
const ov::Output<ov::Node>& imag_part);

std::pair<ov::Output<ov::Node>, ov::Output<ov::Node>> complex_polar_to_rectangular(
const ov::Output<ov::Node>& real_part,
const ov::Output<ov::Node>& imag_part);

} // namespace tensorflow
} // namespace frontend
} // namespace ov
34 changes: 1 addition & 33 deletions src/frontends/tensorflow_common/src/op/atan2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,39 +29,7 @@ OutputVector translate_atan2_op(const NodeContext& node) {
auto y = node.get_input(0);
auto x = node.get_input(1);

// handle the first condition : x>0
auto div_y_x = make_shared<v1::Divide>(y, x);
auto atan = make_shared<v0::Atan>(div_y_x);
auto const_zero = create_same_type_const_scalar<int32_t>(x, 0);
auto result = atan->output(0);

// handle the second condition : x<0 && y>=0
auto const_pi = create_same_type_const_scalar<double>(x, std::atan(1.0) * 4);
auto is_x_negative = make_shared<v1::Less>(x, const_zero);
auto y_non_negative = make_shared<v1::GreaterEqual>(y, const_zero);
auto cond1 = make_shared<v1::LogicalAnd>(is_x_negative, y_non_negative);
auto atan_y_x_plus_pi = make_shared<v1::Add>(atan, const_pi);
result = make_shared<v1::Select>(cond1, atan_y_x_plus_pi, result);

// handle the third condition : x<0 && y<0
auto is_y_negative = make_shared<v1::Less>(y, const_zero);
auto cond2 = make_shared<v1::LogicalAnd>(is_x_negative, is_y_negative);
auto atan_y_x_minus_pi = make_shared<v1::Subtract>(atan, const_pi);
result = make_shared<v1::Select>(cond2, atan_y_x_minus_pi, result);

// handle the fourth condition : x=0 && y>0
auto is_x_zero = make_shared<v1::Equal>(x, const_zero);
auto is_y_positive = make_shared<v1::Greater>(y, const_zero);
auto cond3 = make_shared<v1::LogicalAnd>(is_x_zero, is_y_positive);
auto const_two = create_same_type_const_scalar<int32_t>(x, 2);
auto pi_div_two = make_shared<v1::Divide>(const_pi, const_two);
result = make_shared<v1::Select>(cond3, pi_div_two, result);

// handle the fifth condition : x=0 && y<0
auto cond4 = make_shared<v1::LogicalAnd>(is_x_zero, is_y_negative);
auto const_minus_two = create_same_type_const_scalar<int32_t>(x, -2);
auto pi_div_minus_two = make_shared<v1::Divide>(const_pi, const_minus_two);
result = make_shared<v1::Select>(cond4, pi_div_two, result);
auto result = atan2_op(y, x);

set_node_name(node.get_name(), result.get_node_shared_ptr());
return {result};
Expand Down
63 changes: 62 additions & 1 deletion src/frontends/tensorflow_common/src/op/reduce.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,11 @@
//

#include "common_op_table.hpp"
#include "helper_ops/complex_type_mark.hpp"
#include "openvino/op/concat.hpp"
#include "openvino/op/equal.hpp"
#include "openvino/op/gather.hpp"
#include "openvino/op/logical_and.hpp"
#include "openvino/op/reduce_l2.hpp"
#include "openvino/op/reduce_logical_and.hpp"
#include "openvino/op/reduce_logical_or.hpp"
Expand All @@ -11,6 +16,9 @@
#include "openvino/op/reduce_min.hpp"
#include "openvino/op/reduce_prod.hpp"
#include "openvino/op/reduce_sum.hpp"
#include "openvino/op/select.hpp"
#include "openvino/op/unsqueeze.hpp"
#include "utils.hpp"

using namespace std;
using namespace ov::op;
Expand All @@ -30,7 +38,6 @@ OutputVector translate_direct_reduce_op(const NodeContext& node) {
"Max",
"Mean",
"Min",
"Prod",
"Sum",
"MEAN",
"REDUCE_ALL",
Expand All @@ -47,6 +54,60 @@ OutputVector translate_direct_reduce_op(const NodeContext& node) {
return {reduce_op};
}

OutputVector translate_prod_op(const NodeContext& node) {
default_op_checks(node, 2, {"Prod"}, true);
auto input = node.get_input(0);
auto axis = node.get_input(1);
auto keep_dims = node.get_attribute<bool>("keep_dims", false);

auto complex_type_mark = as_type_ptr<ComplexTypeMark>(input.get_node_shared_ptr());

if (complex_type_mark) {
element::Type complex_part_type = complex_type_mark->get_complex_part_type();
input = complex_type_mark->input_value(0);

auto gather_index_real = make_shared<v0::Constant>(element::i64, Shape{}, 0);
auto gather_index_imag = make_shared<v0::Constant>(element::i64, Shape{}, 1);
auto minus_one = make_shared<v0::Constant>(element::i32, Shape{1}, -1);

auto real_part = make_shared<v8::Gather>(input, gather_index_real, minus_one);
auto imag_part = make_shared<v8::Gather>(input, gather_index_imag, minus_one);

auto const_zero = create_same_type_const_scalar<float>(real_part, 0.0f);
auto is_real_part_zero = make_shared<v1::Equal>(real_part, const_zero);
auto is_imag_part_zero = make_shared<v1::Equal>(imag_part, const_zero);

auto is_complex_number_zero = make_shared<v1::LogicalAnd>(is_real_part_zero, is_imag_part_zero);

Output<Node> r, theta;
std::tie(r, theta) = complex_rectangular_to_polar(real_part, imag_part);

// theta for 0+0j will be nan but to make formula work properly it should be 0
theta = make_shared<v1::Select>(is_complex_number_zero, const_zero, theta);

// formula = e^( j * k ) * (r_0 * r_1 * ... * r_n)
// k = theta_0 + theta_1 + ... + theta_n
auto k = make_shared<v1::ReduceSum>(theta, axis, keep_dims);
auto new_r = make_shared<v1::ReduceProd>(r, axis, keep_dims);

Output<Node> new_real, new_imag;
std::tie(new_real, new_imag) = complex_polar_to_rectangular(new_r, k);

auto real_unsqueeze = make_shared<v0::Unsqueeze>(new_real, minus_one);
auto imag_unsqueeze = make_shared<v0::Unsqueeze>(new_imag, minus_one);

auto concat_result = make_shared<v0::Concat>(OutputVector{real_unsqueeze, imag_unsqueeze}, -1);
set_node_name(node.get_name(), concat_result);

auto complex_result = make_shared<ComplexTypeMark>(concat_result, complex_part_type);
return {complex_result};
}

auto prod_result = make_shared<v1::ReduceProd>(input, axis, keep_dims);
set_node_name(node.get_name(), prod_result);
return {prod_result};
}

template OutputVector translate_direct_reduce_op<v1::ReduceLogicalOr>(const NodeContext& node);
template OutputVector translate_direct_reduce_op<v1::ReduceLogicalAnd>(const NodeContext& node);
template OutputVector translate_direct_reduce_op<v1::ReduceMax>(const NodeContext& node);
Expand Down
72 changes: 72 additions & 0 deletions src/frontends/tensorflow_common/src/utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,31 +10,38 @@
#include "helper_ops/complex_type_mark.hpp"
#include "openvino/op/abs.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/atan.hpp"
#include "openvino/op/broadcast.hpp"
#include "openvino/op/concat.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/convert.hpp"
#include "openvino/op/convolution.hpp"
#include "openvino/op/cos.hpp"
#include "openvino/op/divide.hpp"
#include "openvino/op/equal.hpp"
#include "openvino/op/floor.hpp"
#include "openvino/op/floor_mod.hpp"
#include "openvino/op/gather.hpp"
#include "openvino/op/greater.hpp"
#include "openvino/op/greater_eq.hpp"
#include "openvino/op/group_conv.hpp"
#include "openvino/op/less.hpp"
#include "openvino/op/logical_and.hpp"
#include "openvino/op/maximum.hpp"
#include "openvino/op/multiply.hpp"
#include "openvino/op/pad.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/power.hpp"
#include "openvino/op/reduce_max.hpp"
#include "openvino/op/reduce_min.hpp"
#include "openvino/op/reshape.hpp"
#include "openvino/op/scatter_nd_update.hpp"
#include "openvino/op/select.hpp"
#include "openvino/op/shape_of.hpp"
#include "openvino/op/sin.hpp"
#include "openvino/op/slice.hpp"
#include "openvino/op/split.hpp"
#include "openvino/op/sqrt.hpp"
#include "openvino/op/squeeze.hpp"
#include "openvino/op/subtract.hpp"
#include "openvino/op/transpose.hpp"
Expand Down Expand Up @@ -572,6 +579,71 @@ ov::Output<ov::Node> create_dense_tensor(const ov::Output<ov::Node>& indices,
return dense_tensor;
}

ov::Output<ov::Node> atan2_op(const ov::Output<ov::Node>& y, const ov::Output<ov::Node>& x) {
// handle the first condition : x>0
auto div_y_x = std::make_shared<v1::Divide>(y, x);
auto atan = std::make_shared<v0::Atan>(div_y_x);
auto const_zero = create_same_type_const_scalar<int32_t>(x, 0);
auto result = atan->output(0);

// handle the second condition : x<0 && y>=0
auto const_pi = create_same_type_const_scalar<double>(x, std::atan(1.0) * 4);
auto is_x_negative = std::make_shared<v1::Less>(x, const_zero);
auto y_non_negative = std::make_shared<v1::GreaterEqual>(y, const_zero);
auto cond1 = std::make_shared<v1::LogicalAnd>(is_x_negative, y_non_negative);
auto atan_y_x_plus_pi = make_shared<v1::Add>(atan, const_pi);
result = make_shared<v1::Select>(cond1, atan_y_x_plus_pi, result);

// handle the third condition : x<0 && y<0
auto is_y_negative = std::make_shared<v1::Less>(y, const_zero);
auto cond2 = std::make_shared<v1::LogicalAnd>(is_x_negative, is_y_negative);
auto atan_y_x_minus_pi = std::make_shared<v1::Subtract>(atan, const_pi);
result = std::make_shared<v1::Select>(cond2, atan_y_x_minus_pi, result);

// handle the fourth condition : x=0 && y>0
auto is_x_zero = std::make_shared<v1::Equal>(x, const_zero);
auto is_y_positive = std::make_shared<v1::Greater>(y, const_zero);
auto cond3 = std::make_shared<v1::LogicalAnd>(is_x_zero, is_y_positive);
auto const_two = create_same_type_const_scalar<int32_t>(x, 2);
auto pi_div_two = make_shared<v1::Divide>(const_pi, const_two);
result = std::make_shared<v1::Select>(cond3, pi_div_two, result);

// handle the fifth condition : x=0 && y<0
auto cond4 = std::make_shared<v1::LogicalAnd>(is_x_zero, is_y_negative);
auto const_minus_two = create_same_type_const_scalar<int32_t>(x, -2);
auto pi_div_minus_two = make_shared<v1::Divide>(const_pi, const_minus_two);
result = std::make_shared<v1::Select>(cond4, pi_div_minus_two, result);

return result;
}

std::pair<ov::Output<ov::Node>, ov::Output<ov::Node>> complex_rectangular_to_polar(
const ov::Output<ov::Node>& real_part,
const ov::Output<ov::Node>& imag_part) {
// r = sqrt(a^2 + b^2)
auto const_two = create_same_type_const_scalar<float>(real_part, 2.0f);
auto sum_sq = std::make_shared<v1::Add>(std::make_shared<v1::Power>(real_part, const_two),
std::make_shared<v1::Power>(imag_part, const_two));
auto r = std::make_shared<v0::Sqrt>(sum_sq);

// theta = atan2(b, a)
auto theta = atan2_op(imag_part, real_part);

return std::make_pair(r, theta);
};

std::pair<ov::Output<ov::Node>, ov::Output<ov::Node>> complex_polar_to_rectangular(const ov::Output<ov::Node>& r,
const ov::Output<ov::Node>& theta) {
// z = r * (cos(theta) + sin(theta)*j) = real_part + imag_part*j
auto sin = make_shared<v0::Sin>(theta);
auto cos = make_shared<v0::Cos>(theta);

auto real_part = make_shared<v1::Multiply>(r, cos);
auto imag_part = make_shared<v1::Multiply>(r, sin);

return std::make_pair(real_part, imag_part);
};

} // namespace tensorflow
} // namespace frontend
} // namespace ov
51 changes: 51 additions & 0 deletions tests/layer_tests/tensorflow_tests/test_tf_ReduceArithmeticOps.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import pytest
from common.tf_layer_test_class import CommonTFLayerTest

rng = np.random.default_rng(475912)

class TestReduceArithmeticOps(CommonTFLayerTest):
def _prepare_input(self, inputs_info):
Expand Down Expand Up @@ -50,3 +51,53 @@ def test_reduce(self, params, operation, keep_dims, ie_device, precision, ir_ver
use_legacy_frontend=use_legacy_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_legacy_frontend=use_legacy_frontend)


class TestComplexProd(CommonTFLayerTest):
def _prepare_input(self, inputs_info):
assert 'param_real:0' in inputs_info, "Test error: inputs_info must contain `param_real`"
assert 'param_imag:0' in inputs_info, "Test error: inputs_info must contain `param_imag`"
x_shape = inputs_info['param_real:0']
inputs_data = {}
inputs_data['param_real:0'] = rng.integers(-10, 10, x_shape).astype(np.float32)
inputs_data['param_imag:0'] = rng.integers(-10, 10, x_shape).astype(np.float32)

return inputs_data

def create_complex_prod_net(self, shape, axis, keep_dims, ir_version, use_legacy_frontend):
import tensorflow as tf
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
param_real = tf.compat.v1.placeholder(tf.float32, shape, 'param_real')
param_imag = tf.compat.v1.placeholder(tf.float32, shape, 'param_imag')

complex = tf.raw_ops.Complex(real=param_real, imag=param_imag)

result = tf.raw_ops.Prod(input=complex, axis=axis, keep_dims=keep_dims, name="Prod")

tf.raw_ops.Real(input=result)
tf.raw_ops.Imag(input=result)

tf.compat.v1.global_variables_initializer()
tf_net = sess.graph_def

return tf_net, None

test_data = [
dict(shape=[2], axis=0),
dict(shape=[2, 3, 5], axis=1),
dict(shape=[3, 1, 2, 4], axis=-2),
]

@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("keep_dims", [True, False])
@pytest.mark.nightly
@pytest.mark.precommit
def test_reduce(self, params, keep_dims, ie_device, precision, ir_version, temp_dir,
use_legacy_frontend):
if ie_device == 'GPU' and params['shape'] in [[2, 3, 5], [3, 1, 2, 4]]:
pytest.skip('GPU plugin accuracy error')
self._test(*self.create_complex_prod_net(**params, keep_dims=keep_dims, ir_version=ir_version,
use_legacy_frontend=use_legacy_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_legacy_frontend=use_legacy_frontend)

0 comments on commit a8b4e91

Please sign in to comment.