-
Notifications
You must be signed in to change notification settings - Fork 3.5k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[Relay] Remove DynamicToStatic pass from graph runtime build #10691
Changes from all commits
63947be
670dfa2
464d686
2c8df8b
4d87e89
9190456
b15431d
adcbf96
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -226,16 +226,6 @@ Array<Pass> GetPassPrefix(bool is_homegeneous, bool is_vm) { | |
// eta expand to support constructors in argument position | ||
pass_seqs.push_back(transform::EtaExpand( | ||
/* expand_constructor */ true, /* expand_global_var */ false)); | ||
} else { | ||
// DynamicToStatic runs FoldConstant, which affects SimplifyExpr below. | ||
// Task extraction uses the is_vm=true branch, meaning SimplifyExpr sees different | ||
// inputs from the ones when invoked via relay.build(...). | ||
// This causes workload lookups in ApplyHistoryBest to fail if the lookup depends on | ||
// the structual hash of the input relay module (e.g. MetaScheduler). | ||
// TODO(masahi): Either remove DynamicToStatic below or always run it | ||
|
||
// Convert Dynamic ops to static versions | ||
pass_seqs.push_back(transform::DynamicToStatic()); | ||
} | ||
|
||
PackedFunc fskip = PackedFunc([](TVMArgs args, TVMRetValue* rv) { | ||
|
@@ -252,12 +242,12 @@ Array<Pass> GetPassPrefix(bool is_homegeneous, bool is_vm) { | |
*rv = false; | ||
}); | ||
pass_seqs.push_back(transform::EliminateCommonSubexpr(fskip)); | ||
pass_seqs.push_back(transform::SimplifyExpr()); | ||
pass_seqs.push_back(transform::CombineParallelConv2D(3)); | ||
pass_seqs.push_back(transform::CombineParallelDense(3)); | ||
pass_seqs.push_back(transform::CombineParallelBatchMatmul(3)); | ||
pass_seqs.push_back(transform::FoldConstant()); | ||
pass_seqs.push_back(transform::FoldScaleAxis()); | ||
pass_seqs.push_back(transform::SimplifyExpr()); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I realized that |
||
pass_seqs.push_back(transform::CanonicalizeCast()); | ||
pass_seqs.push_back(transform::CanonicalizeOps()); | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -147,6 +147,7 @@ def run_tvm_graph( | |
outputs=out_names, | ||
convert_config=convert_config, | ||
) | ||
|
||
dev = tvm.device(target, 0) | ||
if mode == "debug": | ||
inputs = [] | ||
|
@@ -2421,10 +2422,11 @@ def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_s | |
) | ||
oshape = tf.constant(output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype)) | ||
|
||
# Output shape depends on a dynamic input, use VM. | ||
if default_value == None: | ||
output = tf.sparse_to_dense(indices, oshape, values) | ||
compare_tf_with_tvm( | ||
[sparse_indices, sparse_values], ["indices:0", "values:0"], output.name | ||
[sparse_indices, sparse_values], ["indices:0", "values:0"], output.name, mode="vm" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This test was previously working on the graph runtime just because the dynamic input is bound to a constant tensor before |
||
) | ||
else: | ||
dv = tf.placeholder(shape=(), dtype=str(default_value.dtype), name="default_value") | ||
|
@@ -2433,6 +2435,7 @@ def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_s | |
[sparse_indices, sparse_values, default_value], | ||
["indices:0", "values:0", "default_value:0"], | ||
output.name, | ||
mode="vm", | ||
) | ||
|
||
|
||
|
@@ -2494,7 +2497,8 @@ def _test_sparse_to_dense_v2(indices, values, A_shape, dtype, default_value=None | |
|
||
result = tf.sparse.to_dense(A_sp, default_value=default_value) | ||
|
||
compare_tf_with_tvm([], [], result.name) | ||
# The output shape depends on a dynamic input, use VM. | ||
compare_tf_with_tvm([], [], result.name, mode="vm") | ||
|
||
|
||
def test_forward_sparse_to_dense_v2(): | ||
|
@@ -5572,7 +5576,7 @@ def _test_unique(n, dtype, is_dyn): | |
if is_dyn: | ||
compare_tf_with_tvm(np_data, "in_data:0", ["Unique:0", "Unique:1"], mode="vm") | ||
else: | ||
compare_tf_with_tvm(None, "", ["Unique:0", "Unique:1"]) | ||
compare_tf_with_tvm(np_data, "", ["Unique:0", "Unique:1"], mode="vm") | ||
|
||
|
||
def test_forward_unique(): | ||
|
@@ -5607,7 +5611,10 @@ def _test_unique_with_counts(n, dtype, is_dyn): | |
) | ||
else: | ||
compare_tf_with_tvm( | ||
None, "", ["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"] | ||
np_data, | ||
"", | ||
["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"], | ||
mode="vm", | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
) | ||
|
||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Before this PR, tflite mobilenet was returning a dynamic shape output 🤦♂️