From 63947be0bca1a6ad983774eb610613cc33b4aa5b Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Mon, 21 Mar 2022 06:47:54 +0900 Subject: [PATCH 1/8] [Relay] Remove DynamicToStatic pass from graph runtime build --- src/relay/backend/utils.cc | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/relay/backend/utils.cc b/src/relay/backend/utils.cc index a07e20bf1835..bfef0df261ee 100644 --- a/src/relay/backend/utils.cc +++ b/src/relay/backend/utils.cc @@ -226,16 +226,6 @@ Array GetPassPrefix(bool is_homegeneous, bool is_vm) { // eta expand to support constructors in argument position pass_seqs.push_back(transform::EtaExpand( /* expand_constructor */ true, /* expand_global_var */ false)); - } else { - // DynamicToStatic runs FoldConstant, which affects SimplifyExpr below. - // Task extraction uses the is_vm=true branch, meaning SimplifyExpr sees different - // inputs from the ones when invoked via relay.build(...). - // This causes workload lookups in ApplyHistoryBest to fail if the lookup depends on - // the structual hash of the input relay module (e.g. MetaScheduler). - // TODO(masahi): Either remove DynamicToStatic below or always run it - - // Convert Dynamic ops to static versions - pass_seqs.push_back(transform::DynamicToStatic()); } PackedFunc fskip = PackedFunc([](TVMArgs args, TVMRetValue* rv) { From 670dfa2d05dc6088f22e753d65d24f5c6a808f8b Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Mon, 21 Mar 2022 16:31:05 +0900 Subject: [PATCH 2/8] Fix the use of op.shape_of in tflite frontend --- python/tvm/relay/frontend/tflite.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/tvm/relay/frontend/tflite.py b/python/tvm/relay/frontend/tflite.py index 4e4092b7b387..d430eaccbdc3 100644 --- a/python/tvm/relay/frontend/tflite.py +++ b/python/tvm/relay/frontend/tflite.py @@ -33,7 +33,7 @@ from ..backend.name_transforms import sanitize_name from .common import ExprTable from .common import infer_shape as _infer_shape -from .common import to_int_list +from .common import to_int_list, shape_of from .tflite_flexbuffer import FlexBufferDecoder __all__ = ["from_tflite"] @@ -846,7 +846,7 @@ def convert_shape(self, op): input_tensors = self.get_input_tensors(op) assert len(input_tensors) == 1, "input tensors length should be 1" - out = _op.shape_of(self.get_tensor_expr(input_tensors[0])) + out = shape_of(self.get_tensor_expr(input_tensors[0])) return out From 464d686774cb0f66678e86eee205ec724ce9b98f Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Mon, 21 Mar 2022 17:04:01 +0900 Subject: [PATCH 3/8] fix dtype mismatch in fill_value --- python/tvm/relay/frontend/pytorch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tvm/relay/frontend/pytorch.py b/python/tvm/relay/frontend/pytorch.py index 678dab36a659..37a8e459aadb 100644 --- a/python/tvm/relay/frontend/pytorch.py +++ b/python/tvm/relay/frontend/pytorch.py @@ -641,7 +641,7 @@ def full_impl(self, data, fill_value, dtype): tmp.append(_op.cast(_op.expand_dims(dim, axis=0), "int64")) size = _op.concatenate(tmp, axis=0) - out = _op.full(_expr.const(fill_value), size, dtype=dtype) + out = _op.full(_expr.const(fill_value, dtype=dtype), size, dtype=dtype) if need_reshape: out = _op.reshape(out, new_shape) return out From 2c8df8b2d36858b3b78f382bbf229c327a72ca03 Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Mon, 21 Mar 2022 17:11:45 +0900 Subject: [PATCH 4/8] add FoldConstant to SimplifyExpr's prerequisite pass --- src/relay/transforms/simplify_expr.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/relay/transforms/simplify_expr.cc b/src/relay/transforms/simplify_expr.cc index 209639dd8f83..6c54eae71c2a 100644 --- a/src/relay/transforms/simplify_expr.cc +++ b/src/relay/transforms/simplify_expr.cc @@ -708,7 +708,7 @@ Pass SimplifyExpr() { [=](Function f, IRModule m, PassContext pc) { return Downcast(SimplifyExpr(f, m)); }; - return CreateFunctionPass(pass_func, 0, "SimplifyExpr", {"InferType"}); + return CreateFunctionPass(pass_func, 0, "SimplifyExpr", {"InferType", "FoldConstant"}); } TVM_REGISTER_GLOBAL("relay._transform.SimplifyExpr").set_body_typed(SimplifyExpr); From 4d87e89d5bec32948fd56c22fe6d64e499abcb06 Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Mon, 21 Mar 2022 17:44:19 +0900 Subject: [PATCH 5/8] always use vm for TF unique test since unique is always dynamic --- tests/python/frontend/tensorflow/test_forward.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index f8e819dc015e..037bd6a5f002 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -5572,7 +5572,7 @@ def _test_unique(n, dtype, is_dyn): if is_dyn: compare_tf_with_tvm(np_data, "in_data:0", ["Unique:0", "Unique:1"], mode="vm") else: - compare_tf_with_tvm(None, "", ["Unique:0", "Unique:1"]) + compare_tf_with_tvm(np_data, "", ["Unique:0", "Unique:1"], mode="vm") def test_forward_unique(): @@ -5607,7 +5607,10 @@ def _test_unique_with_counts(n, dtype, is_dyn): ) else: compare_tf_with_tvm( - None, "", ["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"] + np_data, + "", + ["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"], + mode="vm", ) From 9190456f847da3d4483ea6040af920736b282e19 Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Mon, 21 Mar 2022 17:49:11 +0900 Subject: [PATCH 6/8] removed the use of dyn broadcast in paddle prelu --- python/tvm/relay/frontend/paddlepaddle.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index d09e2c8e8da0..7823682c9cc3 100644 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -1281,7 +1281,7 @@ def convert_prelu(g, op, block): shape = _op.strided_slice(shape_of(x), [0], [1]) else: shape = _op.strided_slice(shape_of(x), [1], [2]) - alpha = _op.broadcast_to(alpha, shape) + alpha = _op.broadcast_to(alpha, fold_constant(shape)) out = _op.nn.prelu(x, alpha, axis) g.add_node(op.output("Out")[0], out) From b15431dd6ae3f681ff373bb0483dbf797b89c5d9 Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Mon, 21 Mar 2022 17:56:54 +0900 Subject: [PATCH 7/8] try running simplifyexpr after foldconstant --- src/relay/backend/utils.cc | 2 +- src/relay/transforms/simplify_expr.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/relay/backend/utils.cc b/src/relay/backend/utils.cc index bfef0df261ee..1cc726c59f65 100644 --- a/src/relay/backend/utils.cc +++ b/src/relay/backend/utils.cc @@ -242,12 +242,12 @@ Array GetPassPrefix(bool is_homegeneous, bool is_vm) { *rv = false; }); pass_seqs.push_back(transform::EliminateCommonSubexpr(fskip)); - pass_seqs.push_back(transform::SimplifyExpr()); pass_seqs.push_back(transform::CombineParallelConv2D(3)); pass_seqs.push_back(transform::CombineParallelDense(3)); pass_seqs.push_back(transform::CombineParallelBatchMatmul(3)); pass_seqs.push_back(transform::FoldConstant()); pass_seqs.push_back(transform::FoldScaleAxis()); + pass_seqs.push_back(transform::SimplifyExpr()); pass_seqs.push_back(transform::CanonicalizeCast()); pass_seqs.push_back(transform::CanonicalizeOps()); diff --git a/src/relay/transforms/simplify_expr.cc b/src/relay/transforms/simplify_expr.cc index 6c54eae71c2a..209639dd8f83 100644 --- a/src/relay/transforms/simplify_expr.cc +++ b/src/relay/transforms/simplify_expr.cc @@ -708,7 +708,7 @@ Pass SimplifyExpr() { [=](Function f, IRModule m, PassContext pc) { return Downcast(SimplifyExpr(f, m)); }; - return CreateFunctionPass(pass_func, 0, "SimplifyExpr", {"InferType", "FoldConstant"}); + return CreateFunctionPass(pass_func, 0, "SimplifyExpr", {"InferType"}); } TVM_REGISTER_GLOBAL("relay._transform.SimplifyExpr").set_body_typed(SimplifyExpr); From adcbf9675fca8a0855641fe2136cf014a8061c92 Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Tue, 22 Mar 2022 05:17:00 +0900 Subject: [PATCH 8/8] use vm in TF sparse_to_dense test since it depends on dynamic input --- tests/python/frontend/tensorflow/test_forward.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 037bd6a5f002..4988f57c24c4 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -147,6 +147,7 @@ def run_tvm_graph( outputs=out_names, convert_config=convert_config, ) + dev = tvm.device(target, 0) if mode == "debug": inputs = [] @@ -2421,10 +2422,11 @@ def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_s ) oshape = tf.constant(output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype)) + # Output shape depends on a dynamic input, use VM. if default_value == None: output = tf.sparse_to_dense(indices, oshape, values) compare_tf_with_tvm( - [sparse_indices, sparse_values], ["indices:0", "values:0"], output.name + [sparse_indices, sparse_values], ["indices:0", "values:0"], output.name, mode="vm" ) else: dv = tf.placeholder(shape=(), dtype=str(default_value.dtype), name="default_value") @@ -2433,6 +2435,7 @@ def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_s [sparse_indices, sparse_values, default_value], ["indices:0", "values:0", "default_value:0"], output.name, + mode="vm", ) @@ -2494,7 +2497,8 @@ def _test_sparse_to_dense_v2(indices, values, A_shape, dtype, default_value=None result = tf.sparse.to_dense(A_sp, default_value=default_value) - compare_tf_with_tvm([], [], result.name) + # The output shape depends on a dynamic input, use VM. + compare_tf_with_tvm([], [], result.name, mode="vm") def test_forward_sparse_to_dense_v2():