Skip to content

Commit

Permalink
[TFLite] Reshape - support different qnn params for input and output (a…
Browse files Browse the repository at this point in the history
  • Loading branch information
anijain2305 authored and trevor-m committed Jan 21, 2021
1 parent 8327757 commit 046a739
Show file tree
Hide file tree
Showing 2 changed files with 130 additions and 31 deletions.
30 changes: 29 additions & 1 deletion python/tvm/relay/frontend/tflite.py
Original file line number Diff line number Diff line change
Expand Up @@ -511,13 +511,30 @@ def convert_reshape(self, op):
in_expr = self.get_expr(input_tensor_idx)

# If the tensors are quantized, ensure that input/output qnn params are same.
if input_tensor.qnn_params:

input_tensor_type_str = self.get_tensor_type_str(input_tensor.tensor.Type())
if input_tensor.qnn_params and input_tensor_type_str == "int8":
# TFLite 2.x quantization spec requires qnn params to be same and dtype to be int8.
# For TFLite 1.x, dtype can be uint8 and qnn params can be different
output_tensor = output_tensors[0]
assert self.has_same_qnn_params(
input_tensor, output_tensor
), "TFLite reshape requires input and output scale and zero points to be equal"

out = _op.reshape(in_expr, newshape=target_shape)
if input_tensor.qnn_params and input_tensor_type_str == "uint8":
output_tensor = output_tensors[0]
if not self.has_same_qnn_params(input_tensor, output_tensor):
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = _qnn.op.requantize(
out,
input_scale=input_tensor.qnn_params["scale"],
input_zero_point=input_tensor.qnn_params["zero_point"],
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
out_dtype=output_tensor_type_str,
)

return out

def _convert_resize(self, method, op):
Expand Down Expand Up @@ -2527,6 +2544,17 @@ def convert_pack(self, op):
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"

if input_tensors[0].qnn_params:
output_tensor = output_tensors[0]
assert self.has_same_qnn_params(
input_tensors[0], output_tensor
), "TFLite pack requires input and output scale and zero points to be equal"

for input_tensor in input_tensors:
assert self.has_same_qnn_params(
input_tensors[0], input_tensor
), "TFLite pack requires all input tensors to have same scale and zero point"

assert op.BuiltinOptionsType() == BuiltinOptions.PackOptions
op_options = op.BuiltinOptions()
pack_options = PackOptions()
Expand Down
131 changes: 101 additions & 30 deletions tests/python/frontend/tflite/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -1251,30 +1251,61 @@ def test_forward_transpose_conv():
# -------


def _test_reshape(data, out_shape, wrap_shape):
def _test_reshape(data, out_shape, wrap_shape, quantized=False):
""" One iteration of reshape operation with given data and out shape """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
if quantized:
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in")
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-100, max=100, name="inq_0"
)

out_shape = out_shape if not wrap_shape else np.array(out_shape, dtype=np.int32)
input_range = {"inq_0": (-100, 100)}
out_shape = out_shape if not wrap_shape else np.array(out_shape, dtype=np.int32)

in_shape = (
out_shape
if not wrap_shape
else array_ops.placeholder(
shape=out_shape.shape, dtype=out_shape.dtype, name="Newshape"
in_shape = (
out_shape
if not wrap_shape
else array_ops.placeholder(
shape=out_shape.shape, dtype=out_shape.dtype, name="Newshape"
)
)
)

out = array_ops.reshape(in_data, in_shape)
out = array_ops.reshape(inq_data, in_shape)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-200, max=200, name="out")
compare_tflite_with_tvm(
[data, out_shape] if wrap_shape else [data],
["inq_0:0", "Newshape:0"] if wrap_shape else ["inq_0:0"],
[inq_data, in_shape] if wrap_shape else [inq_data],
[out],
quantized=True,
input_range=input_range,
mode="vm",
)
else:
# Test with tensor and constant
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)

out_shape = out_shape if not wrap_shape else np.array(out_shape, dtype=np.int32)

compare_tflite_with_tvm(
[data, out_shape] if wrap_shape else [data],
["Placeholder:0", "Newshape:0"] if wrap_shape else ["Placeholder:0"],
[in_data, in_shape] if wrap_shape else [in_data],
[out],
mode="vm",
)
in_shape = (
out_shape
if not wrap_shape
else array_ops.placeholder(
shape=out_shape.shape, dtype=out_shape.dtype, name="Newshape"
)
)

out = array_ops.reshape(in_data, in_shape)

compare_tflite_with_tvm(
[data, out_shape] if wrap_shape else [data],
["Placeholder:0", "Newshape:0"] if wrap_shape else ["Placeholder:0"],
[in_data, in_shape] if wrap_shape else [in_data],
[out],
mode="vm",
)


def test_forward_reshape():
Expand All @@ -1284,6 +1315,9 @@ def test_forward_reshape():
_test_reshape(np.arange(6), [3, -1], wrap)
_test_reshape(np.arange(6), [-1], wrap)

_test_reshape(np.arange(6, dtype=np.uint8), [2, 3], False, True)
_test_reshape(np.arange(6, dtype=np.uint8), [-1, 2], False, True)


#######################################################################
# Resize
Expand Down Expand Up @@ -2750,25 +2784,51 @@ def test_forward_one_hot():
# ----


def _test_pack(data, is_var, axis):
def _test_pack(data, is_var, axis, quantized=False):
""" One iteration of pack """

assert len(data) >= 1
assert len(data) == len(is_var)
if quantized:
with tf.Graph().as_default():
in_data = [
array_ops.placeholder(shape=d.shape, dtype="float32", name="in_" + str(idx))
if is_var[idx]
else constant_op.constant(
d, shape=d.shape, dtype="float32", name="in_constant_" + str(idx)
)
for idx, d in enumerate(data)
]
inq_data = [
tf.quantization.fake_quant_with_min_max_args(
i_data, min=-100, max=100, name="inq_{}".format(idx)
)
for idx, i_data in enumerate(in_data)
]
input_range = {}
for i in range(len(data)):
input_range["inq_{}".format(i)] = (-100, 100)

with tf.Graph().as_default():
in_data = [
array_ops.placeholder(shape=d.shape, dtype=d.dtype, name="in_" + str(idx))
if is_var[idx]
else constant_op.constant(
d, shape=d.shape, dtype=d.dtype, name="in_constant_" + str(idx)
out = array_ops.pack(inq_data, axis=axis)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-100, max=100, name="out")
name = ["inq_{}:0".format(idx) for idx in range(len(data))]
compare_tflite_with_tvm(
data, name, inq_data, [out], quantized=True, input_range=input_range
)
for idx, d in enumerate(data)
]
else:
with tf.Graph().as_default():
in_data = [
array_ops.placeholder(shape=d.shape, dtype=d.dtype, name="in_" + str(idx))
if is_var[idx]
else constant_op.constant(
d, shape=d.shape, dtype=d.dtype, name="in_constant_" + str(idx)
)
for idx, d in enumerate(data)
]

out = array_ops.pack(in_data, axis=axis)
name = [_.name for _ in in_data]
compare_tflite_with_tvm(data, name, in_data, [out], experimental_new_converter=True)
out = array_ops.pack(in_data, axis=axis)
name = [_.name for _ in in_data]
compare_tflite_with_tvm(data, name, in_data, [out], experimental_new_converter=True)


def test_forward_pack():
Expand All @@ -2791,6 +2851,17 @@ def test_forward_pack():
1,
)

_test_pack(
[
np.arange(6, dtype=np.uint8).reshape((2, 1, 1, 3)),
np.arange(6, dtype=np.uint8).reshape((2, 1, 1, 3)),
np.arange(6, dtype=np.uint8).reshape((2, 1, 1, 3)),
],
[True, True, True],
1,
quantized=True,
)


#######################################################################
# Unpack
Expand Down

0 comments on commit 046a739

Please sign in to comment.