Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[TFLite][Frontend] Fix test failures caused by div-by-zero #15844

Merged
merged 12 commits into from
Oct 10, 2023
Merged
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 14 additions & 6 deletions tests/python/frontend/tflite/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -2452,6 +2452,7 @@ def _test_elemwise(
qnn_op=None,
same_qnn_params=False,
comparison_op=False,
exclude_zero_point=False,
):
"""One iteration of elemwise"""

Expand Down Expand Up @@ -2480,6 +2481,16 @@ def __test_elemwise(in_data):
inq0_min, inq0_max = (out_min, out_max)
inq1_min, inq1_max = (out_min, out_max)

if exclude_zero_point:
if inq1_max == inq1_min:
raise ZeroDivisionError("Input range is 0.")

# only compute for rhs.
quant_scale = 255 / (inq1_max - inq1_min)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shouldn't there be a dependency on the data type for the value 255? Since the data type can be int8, int16.

zero_point = int(round(-inq1_min * quant_scale))
data[1][data[1] == zero_point] += 1
data[1][data[1] == 0] += 1
Lunderberg marked this conversation as resolved.
Show resolved Hide resolved

# fake_quant will keep the tensors in float32 until the conversion in the session
inq_data = [
tf.quantization.fake_quant_with_min_max_args(
Expand Down Expand Up @@ -2619,6 +2630,7 @@ def _test_div(data, fused_activation_function=None, quantized=False, qnn_op=None
quantized,
qnn_op,
same_qnn_params=True,
exclude_zero_point=True,
)


Expand Down Expand Up @@ -2795,6 +2807,7 @@ def _test_floor_divide(data, fused_activation_function=None, quantized=False, qn
quantized,
qnn_op,
same_qnn_params=True,
exclude_zero_point=True,
)


Expand Down Expand Up @@ -2874,7 +2887,7 @@ def _test_elemwise_qnn_out_range(qnn_op):


def test_all_elemwise():
"""All_elewise"""
"""All_elemwise"""
_test_forward_elemwise(_test_add)
_test_forward_elemwise_quantized(_test_add)
_test_forward_elemwise(partial(_test_add, fused_activation_function="RELU"))
Expand Down Expand Up @@ -2916,11 +2929,6 @@ def test_all_elemwise():
_test_forward_elemwise(_test_floor_divide)
_test_forward_elemwise_quantized(_test_floor_divide)
_test_forward_elemwise(_test_floor_mod)
# This test of quantized floor mod is currently disabled due
p3achyjr marked this conversation as resolved.
Show resolved Hide resolved
# to flaky CI failures in main, failing approximately 45% of
# the time.
#
# _test_forward_elemwise_quantized(_test_floor_mod)


#######################################################################
Expand Down