Skip to content

Commit

Permalink
[Relay] Remove DynamicToStatic pass from graph runtime build (#10691)
Browse files Browse the repository at this point in the history
Closes #10692

To solve this problem, we can either remove this pass from `relay.build(...)` pipeline or run `DynamicToStatic` in both VM and non-VM paths. I propose to remove it because  (1) usually `DynamicToStatic` is supposed to be applied after model import and (2) the only case running `DynamicToStatic` during `relay.build(...)` helps is when the input is entirely static but a frontend fails to produce a static mod AND a user forgets to run `DynamicToStatic` after model import.

 I hope the latter case happens rarely but if not, that's something we should fix in the frontend side. We should avoid relying on `DynamicToStatic` that runs during `relay.build(...)` since not all use cases of TVM use `relay.build(...)` (BYOC, for example).
  • Loading branch information
masahi authored Mar 23, 2022
1 parent 9f58089 commit 4c608be
Show file tree
Hide file tree
Showing 5 changed files with 16 additions and 19 deletions.
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/paddlepaddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -1281,7 +1281,7 @@ def convert_prelu(g, op, block):
shape = _op.strided_slice(shape_of(x), [0], [1])
else:
shape = _op.strided_slice(shape_of(x), [1], [2])
alpha = _op.broadcast_to(alpha, shape)
alpha = _op.broadcast_to(alpha, fold_constant(shape))
out = _op.nn.prelu(x, alpha, axis)
g.add_node(op.output("Out")[0], out)

Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -641,7 +641,7 @@ def full_impl(self, data, fill_value, dtype):
tmp.append(_op.cast(_op.expand_dims(dim, axis=0), "int64"))
size = _op.concatenate(tmp, axis=0)

out = _op.full(_expr.const(fill_value), size, dtype=dtype)
out = _op.full(_expr.const(fill_value, dtype=dtype), size, dtype=dtype)
if need_reshape:
out = _op.reshape(out, new_shape)
return out
Expand Down
4 changes: 2 additions & 2 deletions python/tvm/relay/frontend/tflite.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
from ..backend.name_transforms import sanitize_name
from .common import ExprTable
from .common import infer_shape as _infer_shape
from .common import to_int_list
from .common import to_int_list, shape_of
from .tflite_flexbuffer import FlexBufferDecoder

__all__ = ["from_tflite"]
Expand Down Expand Up @@ -846,7 +846,7 @@ def convert_shape(self, op):
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"

out = _op.shape_of(self.get_tensor_expr(input_tensors[0]))
out = shape_of(self.get_tensor_expr(input_tensors[0]))

return out

Expand Down
12 changes: 1 addition & 11 deletions src/relay/backend/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -226,16 +226,6 @@ Array<Pass> GetPassPrefix(bool is_homegeneous, bool is_vm) {
// eta expand to support constructors in argument position
pass_seqs.push_back(transform::EtaExpand(
/* expand_constructor */ true, /* expand_global_var */ false));
} else {
// DynamicToStatic runs FoldConstant, which affects SimplifyExpr below.
// Task extraction uses the is_vm=true branch, meaning SimplifyExpr sees different
// inputs from the ones when invoked via relay.build(...).
// This causes workload lookups in ApplyHistoryBest to fail if the lookup depends on
// the structual hash of the input relay module (e.g. MetaScheduler).
// TODO(masahi): Either remove DynamicToStatic below or always run it

// Convert Dynamic ops to static versions
pass_seqs.push_back(transform::DynamicToStatic());
}

PackedFunc fskip = PackedFunc([](TVMArgs args, TVMRetValue* rv) {
Expand All @@ -252,12 +242,12 @@ Array<Pass> GetPassPrefix(bool is_homegeneous, bool is_vm) {
*rv = false;
});
pass_seqs.push_back(transform::EliminateCommonSubexpr(fskip));
pass_seqs.push_back(transform::SimplifyExpr());
pass_seqs.push_back(transform::CombineParallelConv2D(3));
pass_seqs.push_back(transform::CombineParallelDense(3));
pass_seqs.push_back(transform::CombineParallelBatchMatmul(3));
pass_seqs.push_back(transform::FoldConstant());
pass_seqs.push_back(transform::FoldScaleAxis());
pass_seqs.push_back(transform::SimplifyExpr());
pass_seqs.push_back(transform::CanonicalizeCast());
pass_seqs.push_back(transform::CanonicalizeOps());

Expand Down
15 changes: 11 additions & 4 deletions tests/python/frontend/tensorflow/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,7 @@ def run_tvm_graph(
outputs=out_names,
convert_config=convert_config,
)

dev = tvm.device(target, 0)
if mode == "debug":
inputs = []
Expand Down Expand Up @@ -2421,10 +2422,11 @@ def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_s
)
oshape = tf.constant(output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype))

# Output shape depends on a dynamic input, use VM.
if default_value == None:
output = tf.sparse_to_dense(indices, oshape, values)
compare_tf_with_tvm(
[sparse_indices, sparse_values], ["indices:0", "values:0"], output.name
[sparse_indices, sparse_values], ["indices:0", "values:0"], output.name, mode="vm"
)
else:
dv = tf.placeholder(shape=(), dtype=str(default_value.dtype), name="default_value")
Expand All @@ -2433,6 +2435,7 @@ def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_s
[sparse_indices, sparse_values, default_value],
["indices:0", "values:0", "default_value:0"],
output.name,
mode="vm",
)


Expand Down Expand Up @@ -2494,7 +2497,8 @@ def _test_sparse_to_dense_v2(indices, values, A_shape, dtype, default_value=None

result = tf.sparse.to_dense(A_sp, default_value=default_value)

compare_tf_with_tvm([], [], result.name)
# The output shape depends on a dynamic input, use VM.
compare_tf_with_tvm([], [], result.name, mode="vm")


def test_forward_sparse_to_dense_v2():
Expand Down Expand Up @@ -5572,7 +5576,7 @@ def _test_unique(n, dtype, is_dyn):
if is_dyn:
compare_tf_with_tvm(np_data, "in_data:0", ["Unique:0", "Unique:1"], mode="vm")
else:
compare_tf_with_tvm(None, "", ["Unique:0", "Unique:1"])
compare_tf_with_tvm(np_data, "", ["Unique:0", "Unique:1"], mode="vm")


def test_forward_unique():
Expand Down Expand Up @@ -5607,7 +5611,10 @@ def _test_unique_with_counts(n, dtype, is_dyn):
)
else:
compare_tf_with_tvm(
None, "", ["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"]
np_data,
"",
["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"],
mode="vm",
)


Expand Down

0 comments on commit 4c608be

Please sign in to comment.