diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index d09e2c8e8da0..7823682c9cc3 100644 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -1281,7 +1281,7 @@ def convert_prelu(g, op, block): shape = _op.strided_slice(shape_of(x), [0], [1]) else: shape = _op.strided_slice(shape_of(x), [1], [2]) - alpha = _op.broadcast_to(alpha, shape) + alpha = _op.broadcast_to(alpha, fold_constant(shape)) out = _op.nn.prelu(x, alpha, axis) g.add_node(op.output("Out")[0], out) diff --git a/python/tvm/relay/frontend/pytorch.py b/python/tvm/relay/frontend/pytorch.py index 678dab36a659..37a8e459aadb 100644 --- a/python/tvm/relay/frontend/pytorch.py +++ b/python/tvm/relay/frontend/pytorch.py @@ -641,7 +641,7 @@ def full_impl(self, data, fill_value, dtype): tmp.append(_op.cast(_op.expand_dims(dim, axis=0), "int64")) size = _op.concatenate(tmp, axis=0) - out = _op.full(_expr.const(fill_value), size, dtype=dtype) + out = _op.full(_expr.const(fill_value, dtype=dtype), size, dtype=dtype) if need_reshape: out = _op.reshape(out, new_shape) return out diff --git a/python/tvm/relay/frontend/tflite.py b/python/tvm/relay/frontend/tflite.py index 4e4092b7b387..d430eaccbdc3 100644 --- a/python/tvm/relay/frontend/tflite.py +++ b/python/tvm/relay/frontend/tflite.py @@ -33,7 +33,7 @@ from ..backend.name_transforms import sanitize_name from .common import ExprTable from .common import infer_shape as _infer_shape -from .common import to_int_list +from .common import to_int_list, shape_of from .tflite_flexbuffer import FlexBufferDecoder __all__ = ["from_tflite"] @@ -846,7 +846,7 @@ def convert_shape(self, op): input_tensors = self.get_input_tensors(op) assert len(input_tensors) == 1, "input tensors length should be 1" - out = _op.shape_of(self.get_tensor_expr(input_tensors[0])) + out = shape_of(self.get_tensor_expr(input_tensors[0])) return out diff --git a/src/relay/backend/utils.cc b/src/relay/backend/utils.cc index a07e20bf1835..1cc726c59f65 100644 --- a/src/relay/backend/utils.cc +++ b/src/relay/backend/utils.cc @@ -226,16 +226,6 @@ Array GetPassPrefix(bool is_homegeneous, bool is_vm) { // eta expand to support constructors in argument position pass_seqs.push_back(transform::EtaExpand( /* expand_constructor */ true, /* expand_global_var */ false)); - } else { - // DynamicToStatic runs FoldConstant, which affects SimplifyExpr below. - // Task extraction uses the is_vm=true branch, meaning SimplifyExpr sees different - // inputs from the ones when invoked via relay.build(...). - // This causes workload lookups in ApplyHistoryBest to fail if the lookup depends on - // the structual hash of the input relay module (e.g. MetaScheduler). - // TODO(masahi): Either remove DynamicToStatic below or always run it - - // Convert Dynamic ops to static versions - pass_seqs.push_back(transform::DynamicToStatic()); } PackedFunc fskip = PackedFunc([](TVMArgs args, TVMRetValue* rv) { @@ -252,12 +242,12 @@ Array GetPassPrefix(bool is_homegeneous, bool is_vm) { *rv = false; }); pass_seqs.push_back(transform::EliminateCommonSubexpr(fskip)); - pass_seqs.push_back(transform::SimplifyExpr()); pass_seqs.push_back(transform::CombineParallelConv2D(3)); pass_seqs.push_back(transform::CombineParallelDense(3)); pass_seqs.push_back(transform::CombineParallelBatchMatmul(3)); pass_seqs.push_back(transform::FoldConstant()); pass_seqs.push_back(transform::FoldScaleAxis()); + pass_seqs.push_back(transform::SimplifyExpr()); pass_seqs.push_back(transform::CanonicalizeCast()); pass_seqs.push_back(transform::CanonicalizeOps()); diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index f8e819dc015e..4988f57c24c4 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -147,6 +147,7 @@ def run_tvm_graph( outputs=out_names, convert_config=convert_config, ) + dev = tvm.device(target, 0) if mode == "debug": inputs = [] @@ -2421,10 +2422,11 @@ def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_s ) oshape = tf.constant(output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype)) + # Output shape depends on a dynamic input, use VM. if default_value == None: output = tf.sparse_to_dense(indices, oshape, values) compare_tf_with_tvm( - [sparse_indices, sparse_values], ["indices:0", "values:0"], output.name + [sparse_indices, sparse_values], ["indices:0", "values:0"], output.name, mode="vm" ) else: dv = tf.placeholder(shape=(), dtype=str(default_value.dtype), name="default_value") @@ -2433,6 +2435,7 @@ def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_s [sparse_indices, sparse_values, default_value], ["indices:0", "values:0", "default_value:0"], output.name, + mode="vm", ) @@ -2494,7 +2497,8 @@ def _test_sparse_to_dense_v2(indices, values, A_shape, dtype, default_value=None result = tf.sparse.to_dense(A_sp, default_value=default_value) - compare_tf_with_tvm([], [], result.name) + # The output shape depends on a dynamic input, use VM. + compare_tf_with_tvm([], [], result.name, mode="vm") def test_forward_sparse_to_dense_v2(): @@ -5572,7 +5576,7 @@ def _test_unique(n, dtype, is_dyn): if is_dyn: compare_tf_with_tvm(np_data, "in_data:0", ["Unique:0", "Unique:1"], mode="vm") else: - compare_tf_with_tvm(None, "", ["Unique:0", "Unique:1"]) + compare_tf_with_tvm(np_data, "", ["Unique:0", "Unique:1"], mode="vm") def test_forward_unique(): @@ -5607,7 +5611,10 @@ def _test_unique_with_counts(n, dtype, is_dyn): ) else: compare_tf_with_tvm( - None, "", ["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"] + np_data, + "", + ["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"], + mode="vm", )