Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Relay] Remove DynamicToStatic pass from graph runtime build #10691

Merged
merged 8 commits into from
Mar 23, 2022
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/paddlepaddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -1281,7 +1281,7 @@ def convert_prelu(g, op, block):
shape = _op.strided_slice(shape_of(x), [0], [1])
else:
shape = _op.strided_slice(shape_of(x), [1], [2])
alpha = _op.broadcast_to(alpha, shape)
alpha = _op.broadcast_to(alpha, fold_constant(shape))
out = _op.nn.prelu(x, alpha, axis)
g.add_node(op.output("Out")[0], out)

Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -641,7 +641,7 @@ def full_impl(self, data, fill_value, dtype):
tmp.append(_op.cast(_op.expand_dims(dim, axis=0), "int64"))
size = _op.concatenate(tmp, axis=0)

out = _op.full(_expr.const(fill_value), size, dtype=dtype)
out = _op.full(_expr.const(fill_value, dtype=dtype), size, dtype=dtype)
if need_reshape:
out = _op.reshape(out, new_shape)
return out
Expand Down
4 changes: 2 additions & 2 deletions python/tvm/relay/frontend/tflite.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
from ..backend.name_transforms import sanitize_name
from .common import ExprTable
from .common import infer_shape as _infer_shape
from .common import to_int_list
from .common import to_int_list, shape_of
from .tflite_flexbuffer import FlexBufferDecoder

__all__ = ["from_tflite"]
Expand Down Expand Up @@ -846,7 +846,7 @@ def convert_shape(self, op):
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"

out = _op.shape_of(self.get_tensor_expr(input_tensors[0]))
out = shape_of(self.get_tensor_expr(input_tensors[0]))
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Before this PR, tflite mobilenet was returning a dynamic shape output 🤦‍♂️


return out

Expand Down
12 changes: 1 addition & 11 deletions src/relay/backend/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -226,16 +226,6 @@ Array<Pass> GetPassPrefix(bool is_homegeneous, bool is_vm) {
// eta expand to support constructors in argument position
pass_seqs.push_back(transform::EtaExpand(
/* expand_constructor */ true, /* expand_global_var */ false));
} else {
// DynamicToStatic runs FoldConstant, which affects SimplifyExpr below.
// Task extraction uses the is_vm=true branch, meaning SimplifyExpr sees different
// inputs from the ones when invoked via relay.build(...).
// This causes workload lookups in ApplyHistoryBest to fail if the lookup depends on
// the structual hash of the input relay module (e.g. MetaScheduler).
// TODO(masahi): Either remove DynamicToStatic below or always run it

// Convert Dynamic ops to static versions
pass_seqs.push_back(transform::DynamicToStatic());
}

PackedFunc fskip = PackedFunc([](TVMArgs args, TVMRetValue* rv) {
Expand All @@ -252,12 +242,12 @@ Array<Pass> GetPassPrefix(bool is_homegeneous, bool is_vm) {
*rv = false;
});
pass_seqs.push_back(transform::EliminateCommonSubexpr(fskip));
pass_seqs.push_back(transform::SimplifyExpr());
pass_seqs.push_back(transform::CombineParallelConv2D(3));
pass_seqs.push_back(transform::CombineParallelDense(3));
pass_seqs.push_back(transform::CombineParallelBatchMatmul(3));
pass_seqs.push_back(transform::FoldConstant());
pass_seqs.push_back(transform::FoldScaleAxis());
pass_seqs.push_back(transform::SimplifyExpr());
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I realized that SimplifyExpr practically depends on FoldConstant be applied beforehand. So I swapped the order.

pass_seqs.push_back(transform::CanonicalizeCast());
pass_seqs.push_back(transform::CanonicalizeOps());

Expand Down
15 changes: 11 additions & 4 deletions tests/python/frontend/tensorflow/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,7 @@ def run_tvm_graph(
outputs=out_names,
convert_config=convert_config,
)

dev = tvm.device(target, 0)
if mode == "debug":
inputs = []
Expand Down Expand Up @@ -2421,10 +2422,11 @@ def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_s
)
oshape = tf.constant(output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype))

# Output shape depends on a dynamic input, use VM.
if default_value == None:
output = tf.sparse_to_dense(indices, oshape, values)
compare_tf_with_tvm(
[sparse_indices, sparse_values], ["indices:0", "values:0"], output.name
[sparse_indices, sparse_values], ["indices:0", "values:0"], output.name, mode="vm"
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This test was previously working on the graph runtime just because the dynamic input is bound to a constant tensor before relay.build(...) in TF tests. Such usage of dynamic inputs makes no sense so I just changed it to use vm for testing.

)
else:
dv = tf.placeholder(shape=(), dtype=str(default_value.dtype), name="default_value")
Expand All @@ -2433,6 +2435,7 @@ def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_s
[sparse_indices, sparse_values, default_value],
["indices:0", "values:0", "default_value:0"],
output.name,
mode="vm",
)


Expand Down Expand Up @@ -2494,7 +2497,8 @@ def _test_sparse_to_dense_v2(indices, values, A_shape, dtype, default_value=None

result = tf.sparse.to_dense(A_sp, default_value=default_value)

compare_tf_with_tvm([], [], result.name)
# The output shape depends on a dynamic input, use VM.
compare_tf_with_tvm([], [], result.name, mode="vm")


def test_forward_sparse_to_dense_v2():
Expand Down Expand Up @@ -5572,7 +5576,7 @@ def _test_unique(n, dtype, is_dyn):
if is_dyn:
compare_tf_with_tvm(np_data, "in_data:0", ["Unique:0", "Unique:1"], mode="vm")
else:
compare_tf_with_tvm(None, "", ["Unique:0", "Unique:1"])
compare_tf_with_tvm(np_data, "", ["Unique:0", "Unique:1"], mode="vm")


def test_forward_unique():
Expand Down Expand Up @@ -5607,7 +5611,10 @@ def _test_unique_with_counts(n, dtype, is_dyn):
)
else:
compare_tf_with_tvm(
None, "", ["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"]
np_data,
"",
["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"],
mode="vm",
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Unique is naturally a dynamic op, but for some reason this test was running on the graph runtime and it happens to be working just because the dynamic input is bound to a constant tensor before relay.build(...). So the test was effectively running unique(const_tensor), which is not really useful.

)


Expand Down